CGExprAgg.cpp revision 226633
1193326Sed//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2193326Sed//
3193326Sed//                     The LLVM Compiler Infrastructure
4193326Sed//
5193326Sed// This file is distributed under the University of Illinois Open Source
6193326Sed// License. See LICENSE.TXT for details.
7193326Sed//
8193326Sed//===----------------------------------------------------------------------===//
9193326Sed//
10193326Sed// This contains code to emit Aggregate Expr nodes as LLVM code.
11193326Sed//
12193326Sed//===----------------------------------------------------------------------===//
13193326Sed
14193326Sed#include "CodeGenFunction.h"
15193326Sed#include "CodeGenModule.h"
16198092Srdivacky#include "CGObjCRuntime.h"
17193326Sed#include "clang/AST/ASTContext.h"
18193326Sed#include "clang/AST/DeclCXX.h"
19193326Sed#include "clang/AST/StmtVisitor.h"
20193326Sed#include "llvm/Constants.h"
21193326Sed#include "llvm/Function.h"
22193326Sed#include "llvm/GlobalVariable.h"
23193326Sed#include "llvm/Intrinsics.h"
24193326Sedusing namespace clang;
25193326Sedusing namespace CodeGen;
26193326Sed
27193326Sed//===----------------------------------------------------------------------===//
28193326Sed//                        Aggregate Expression Emitter
29193326Sed//===----------------------------------------------------------------------===//
30193326Sed
31193326Sednamespace  {
32199990Srdivackyclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
33193326Sed  CodeGenFunction &CGF;
34193326Sed  CGBuilderTy &Builder;
35218893Sdim  AggValueSlot Dest;
36193326Sed  bool IgnoreResult;
37208600Srdivacky
38226633Sdim  /// We want to use 'dest' as the return slot except under two
39226633Sdim  /// conditions:
40226633Sdim  ///   - The destination slot requires garbage collection, so we
41226633Sdim  ///     need to use the GC API.
42226633Sdim  ///   - The destination slot is potentially aliased.
43226633Sdim  bool shouldUseDestForReturnSlot() const {
44226633Sdim    return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
45226633Sdim  }
46226633Sdim
47208600Srdivacky  ReturnValueSlot getReturnValueSlot() const {
48226633Sdim    if (!shouldUseDestForReturnSlot())
49226633Sdim      return ReturnValueSlot();
50208600Srdivacky
51218893Sdim    return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
52208600Srdivacky  }
53208600Srdivacky
54218893Sdim  AggValueSlot EnsureSlot(QualType T) {
55218893Sdim    if (!Dest.isIgnored()) return Dest;
56218893Sdim    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
57218893Sdim  }
58218893Sdim
59193326Sedpublic:
60218893Sdim  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
61218893Sdim                 bool ignore)
62218893Sdim    : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
63218893Sdim      IgnoreResult(ignore) {
64193326Sed  }
65193326Sed
66193326Sed  //===--------------------------------------------------------------------===//
67193326Sed  //                               Utilities
68193326Sed  //===--------------------------------------------------------------------===//
69193326Sed
70193326Sed  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71193326Sed  /// represents a value lvalue, this method emits the address of the lvalue,
72193326Sed  /// then loads the result into DestPtr.
73193326Sed  void EmitAggLoadOfLValue(const Expr *E);
74193326Sed
75193326Sed  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
76193326Sed  void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
77193326Sed  void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
78193326Sed
79226633Sdim  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
80208600Srdivacky
81226633Sdim  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
82226633Sdim    if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
83226633Sdim      return AggValueSlot::NeedsGCBarriers;
84226633Sdim    return AggValueSlot::DoesNotNeedGCBarriers;
85226633Sdim  }
86226633Sdim
87208600Srdivacky  bool TypeRequiresGCollection(QualType T);
88208600Srdivacky
89193326Sed  //===--------------------------------------------------------------------===//
90193326Sed  //                            Visitor Methods
91193326Sed  //===--------------------------------------------------------------------===//
92198092Srdivacky
93193326Sed  void VisitStmt(Stmt *S) {
94193326Sed    CGF.ErrorUnsupported(S, "aggregate expression");
95193326Sed  }
96193326Sed  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
97221345Sdim  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
98221345Sdim    Visit(GE->getResultExpr());
99221345Sdim  }
100193326Sed  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
101224145Sdim  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
102224145Sdim    return Visit(E->getReplacement());
103224145Sdim  }
104193326Sed
105193326Sed  // l-values.
106193326Sed  void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
107193326Sed  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
108193326Sed  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
109193326Sed  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
110224145Sdim  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
111193326Sed  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
112193326Sed    EmitAggLoadOfLValue(E);
113193326Sed  }
114193326Sed  void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
115198092Srdivacky    EmitAggLoadOfLValue(E);
116193326Sed  }
117193326Sed  void VisitPredefinedExpr(const PredefinedExpr *E) {
118198092Srdivacky    EmitAggLoadOfLValue(E);
119193326Sed  }
120198092Srdivacky
121193326Sed  // Operators.
122198092Srdivacky  void VisitCastExpr(CastExpr *E);
123193326Sed  void VisitCallExpr(const CallExpr *E);
124193326Sed  void VisitStmtExpr(const StmtExpr *E);
125193326Sed  void VisitBinaryOperator(const BinaryOperator *BO);
126198398Srdivacky  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
127193326Sed  void VisitBinAssign(const BinaryOperator *E);
128193326Sed  void VisitBinComma(const BinaryOperator *E);
129193326Sed
130193326Sed  void VisitObjCMessageExpr(ObjCMessageExpr *E);
131193326Sed  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
132193326Sed    EmitAggLoadOfLValue(E);
133193326Sed  }
134193326Sed  void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
135198092Srdivacky
136218893Sdim  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
137198092Srdivacky  void VisitChooseExpr(const ChooseExpr *CE);
138193326Sed  void VisitInitListExpr(InitListExpr *E);
139201361Srdivacky  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
140193326Sed  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
141193326Sed    Visit(DAE->getExpr());
142193326Sed  }
143193326Sed  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
144193326Sed  void VisitCXXConstructExpr(const CXXConstructExpr *E);
145218893Sdim  void VisitExprWithCleanups(ExprWithCleanups *E);
146210299Sed  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
147199482Srdivacky  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
148224145Sdim  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
149218893Sdim  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
150218893Sdim
151193326Sed  void VisitVAArgExpr(VAArgExpr *E);
152193326Sed
153224145Sdim  void EmitInitializationToLValue(Expr *E, LValue Address);
154224145Sdim  void EmitNullInitializationToLValue(LValue Address);
155193326Sed  //  case Expr::ChooseExprClass:
156200583Srdivacky  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
157226633Sdim  void VisitAtomicExpr(AtomicExpr *E) {
158226633Sdim    CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
159226633Sdim  }
160193326Sed};
161193326Sed}  // end anonymous namespace.
162193326Sed
163193326Sed//===----------------------------------------------------------------------===//
164193326Sed//                                Utilities
165193326Sed//===----------------------------------------------------------------------===//
166193326Sed
167193326Sed/// EmitAggLoadOfLValue - Given an expression with aggregate type that
168193326Sed/// represents a value lvalue, this method emits the address of the lvalue,
169193326Sed/// then loads the result into DestPtr.
170193326Sedvoid AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
171193326Sed  LValue LV = CGF.EmitLValue(E);
172193326Sed  EmitFinalDestCopy(E, LV);
173193326Sed}
174193326Sed
175208600Srdivacky/// \brief True if the given aggregate type requires special GC API calls.
176208600Srdivackybool AggExprEmitter::TypeRequiresGCollection(QualType T) {
177208600Srdivacky  // Only record types have members that might require garbage collection.
178208600Srdivacky  const RecordType *RecordTy = T->getAs<RecordType>();
179208600Srdivacky  if (!RecordTy) return false;
180208600Srdivacky
181208600Srdivacky  // Don't mess with non-trivial C++ types.
182208600Srdivacky  RecordDecl *Record = RecordTy->getDecl();
183208600Srdivacky  if (isa<CXXRecordDecl>(Record) &&
184208600Srdivacky      (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
185208600Srdivacky       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
186208600Srdivacky    return false;
187208600Srdivacky
188208600Srdivacky  // Check whether the type has an object member.
189208600Srdivacky  return Record->hasObjectMember();
190208600Srdivacky}
191208600Srdivacky
192226633Sdim/// \brief Perform the final move to DestPtr if for some reason
193226633Sdim/// getReturnValueSlot() didn't use it directly.
194208600Srdivacky///
195208600Srdivacky/// The idea is that you do something like this:
196208600Srdivacky///   RValue Result = EmitSomething(..., getReturnValueSlot());
197226633Sdim///   EmitMoveFromReturnSlot(E, Result);
198226633Sdim///
199226633Sdim/// If nothing interferes, this will cause the result to be emitted
200226633Sdim/// directly into the return value slot.  Otherwise, a final move
201226633Sdim/// will be performed.
202226633Sdimvoid AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
203226633Sdim  if (shouldUseDestForReturnSlot()) {
204226633Sdim    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
205226633Sdim    // The possibility of undef rvalues complicates that a lot,
206226633Sdim    // though, so we can't really assert.
207226633Sdim    return;
208210299Sed  }
209226633Sdim
210226633Sdim  // Otherwise, do a final copy,
211226633Sdim  assert(Dest.getAddr() != Src.getAggregateAddr());
212226633Sdim  EmitFinalDestCopy(E, Src, /*Ignore*/ true);
213208600Srdivacky}
214208600Srdivacky
215193326Sed/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
216193326Sedvoid AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
217193326Sed  assert(Src.isAggregate() && "value must be aggregate value!");
218193326Sed
219218893Sdim  // If Dest is ignored, then we're evaluating an aggregate expression
220212904Sdim  // in a context (like an expression statement) that doesn't care
221212904Sdim  // about the result.  C says that an lvalue-to-rvalue conversion is
222212904Sdim  // performed in these cases; C++ says that it is not.  In either
223212904Sdim  // case, we don't actually need to do anything unless the value is
224212904Sdim  // volatile.
225218893Sdim  if (Dest.isIgnored()) {
226212904Sdim    if (!Src.isVolatileQualified() ||
227212904Sdim        CGF.CGM.getLangOptions().CPlusPlus ||
228212904Sdim        (IgnoreResult && Ignore))
229193326Sed      return;
230212904Sdim
231193326Sed    // If the source is volatile, we must read from it; to do that, we need
232193326Sed    // some place to put it.
233218893Sdim    Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
234193326Sed  }
235193326Sed
236218893Sdim  if (Dest.requiresGCollection()) {
237221345Sdim    CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
238226633Sdim    llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
239221345Sdim    llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
240198092Srdivacky    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
241218893Sdim                                                      Dest.getAddr(),
242218893Sdim                                                      Src.getAggregateAddr(),
243218893Sdim                                                      SizeVal);
244198092Srdivacky    return;
245198092Srdivacky  }
246193326Sed  // If the result of the assignment is used, copy the LHS there also.
247193326Sed  // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
248193326Sed  // from the source as well, as we can't eliminate it if either operand
249193326Sed  // is volatile, unless copy has volatile for both source and destination..
250218893Sdim  CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
251218893Sdim                        Dest.isVolatile()|Src.isVolatileQualified());
252193326Sed}
253193326Sed
254193326Sed/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
255193326Sedvoid AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
256193326Sed  assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
257193326Sed
258193326Sed  EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
259193326Sed                                            Src.isVolatileQualified()),
260193326Sed                    Ignore);
261193326Sed}
262193326Sed
263193326Sed//===----------------------------------------------------------------------===//
264193326Sed//                            Visitor Methods
265193326Sed//===----------------------------------------------------------------------===//
266193326Sed
267224145Sdimvoid AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
268224145Sdim  Visit(E->GetTemporaryExpr());
269224145Sdim}
270224145Sdim
271218893Sdimvoid AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
272218893Sdim  EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
273218893Sdim}
274218893Sdim
275224145Sdimvoid
276224145SdimAggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
277224145Sdim  if (E->getType().isPODType(CGF.getContext())) {
278224145Sdim    // For a POD type, just emit a load of the lvalue + a copy, because our
279224145Sdim    // compound literal might alias the destination.
280224145Sdim    // FIXME: This is a band-aid; the real problem appears to be in our handling
281224145Sdim    // of assignments, where we store directly into the LHS without checking
282224145Sdim    // whether anything in the RHS aliases.
283224145Sdim    EmitAggLoadOfLValue(E);
284224145Sdim    return;
285224145Sdim  }
286224145Sdim
287224145Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
288224145Sdim  CGF.EmitAggExpr(E->getInitializer(), Slot);
289224145Sdim}
290224145Sdim
291224145Sdim
292198092Srdivackyvoid AggExprEmitter::VisitCastExpr(CastExpr *E) {
293198092Srdivacky  switch (E->getCastKind()) {
294212904Sdim  case CK_Dynamic: {
295208600Srdivacky    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
296208600Srdivacky    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
297208600Srdivacky    // FIXME: Do we also need to handle property references here?
298208600Srdivacky    if (LV.isSimple())
299208600Srdivacky      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
300208600Srdivacky    else
301208600Srdivacky      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
302208600Srdivacky
303218893Sdim    if (!Dest.isIgnored())
304218893Sdim      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
305208600Srdivacky    break;
306208600Srdivacky  }
307208600Srdivacky
308212904Sdim  case CK_ToUnion: {
309221345Sdim    if (Dest.isIgnored()) break;
310221345Sdim
311198092Srdivacky    // GCC union extension
312212904Sdim    QualType Ty = E->getSubExpr()->getType();
313212904Sdim    QualType PtrTy = CGF.getContext().getPointerType(Ty);
314218893Sdim    llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
315193401Sed                                                 CGF.ConvertType(PtrTy));
316224145Sdim    EmitInitializationToLValue(E->getSubExpr(),
317224145Sdim                               CGF.MakeAddrLValue(CastPtr, Ty));
318198092Srdivacky    break;
319193326Sed  }
320193326Sed
321212904Sdim  case CK_DerivedToBase:
322212904Sdim  case CK_BaseToDerived:
323212904Sdim  case CK_UncheckedDerivedToBase: {
324226633Sdim    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
325208600Srdivacky                "should have been unpacked before we got here");
326208600Srdivacky  }
327208600Srdivacky
328218893Sdim  case CK_GetObjCProperty: {
329218893Sdim    LValue LV = CGF.EmitLValue(E->getSubExpr());
330218893Sdim    assert(LV.isPropertyRef());
331218893Sdim    RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
332226633Sdim    EmitMoveFromReturnSlot(E, RV);
333218893Sdim    break;
334218893Sdim  }
335218893Sdim
336218893Sdim  case CK_LValueToRValue: // hope for downstream optimization
337212904Sdim  case CK_NoOp:
338212904Sdim  case CK_UserDefinedConversion:
339212904Sdim  case CK_ConstructorConversion:
340198092Srdivacky    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
341198092Srdivacky                                                   E->getType()) &&
342198092Srdivacky           "Implicit cast types must be compatible");
343198092Srdivacky    Visit(E->getSubExpr());
344198092Srdivacky    break;
345218893Sdim
346212904Sdim  case CK_LValueBitCast:
347218893Sdim    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
348210299Sed    break;
349221345Sdim
350218893Sdim  case CK_Dependent:
351218893Sdim  case CK_BitCast:
352218893Sdim  case CK_ArrayToPointerDecay:
353218893Sdim  case CK_FunctionToPointerDecay:
354218893Sdim  case CK_NullToPointer:
355218893Sdim  case CK_NullToMemberPointer:
356218893Sdim  case CK_BaseToDerivedMemberPointer:
357218893Sdim  case CK_DerivedToBaseMemberPointer:
358218893Sdim  case CK_MemberPointerToBoolean:
359218893Sdim  case CK_IntegralToPointer:
360218893Sdim  case CK_PointerToIntegral:
361218893Sdim  case CK_PointerToBoolean:
362218893Sdim  case CK_ToVoid:
363218893Sdim  case CK_VectorSplat:
364218893Sdim  case CK_IntegralCast:
365218893Sdim  case CK_IntegralToBoolean:
366218893Sdim  case CK_IntegralToFloating:
367218893Sdim  case CK_FloatingToIntegral:
368218893Sdim  case CK_FloatingToBoolean:
369218893Sdim  case CK_FloatingCast:
370226633Sdim  case CK_CPointerToObjCPointerCast:
371226633Sdim  case CK_BlockPointerToObjCPointerCast:
372218893Sdim  case CK_AnyPointerToBlockPointerCast:
373218893Sdim  case CK_ObjCObjectLValueCast:
374218893Sdim  case CK_FloatingRealToComplex:
375218893Sdim  case CK_FloatingComplexToReal:
376218893Sdim  case CK_FloatingComplexToBoolean:
377218893Sdim  case CK_FloatingComplexCast:
378218893Sdim  case CK_FloatingComplexToIntegralComplex:
379218893Sdim  case CK_IntegralRealToComplex:
380218893Sdim  case CK_IntegralComplexToReal:
381218893Sdim  case CK_IntegralComplexToBoolean:
382218893Sdim  case CK_IntegralComplexCast:
383218893Sdim  case CK_IntegralComplexToFloatingComplex:
384226633Sdim  case CK_ARCProduceObject:
385226633Sdim  case CK_ARCConsumeObject:
386226633Sdim  case CK_ARCReclaimReturnedObject:
387226633Sdim  case CK_ARCExtendBlockObject:
388218893Sdim    llvm_unreachable("cast kind invalid for aggregate types");
389198398Srdivacky  }
390193326Sed}
391193326Sed
392193326Sedvoid AggExprEmitter::VisitCallExpr(const CallExpr *E) {
393193326Sed  if (E->getCallReturnType()->isReferenceType()) {
394193326Sed    EmitAggLoadOfLValue(E);
395193326Sed    return;
396193326Sed  }
397198092Srdivacky
398208600Srdivacky  RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
399226633Sdim  EmitMoveFromReturnSlot(E, RV);
400193326Sed}
401193326Sed
402193326Sedvoid AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
403208600Srdivacky  RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
404226633Sdim  EmitMoveFromReturnSlot(E, RV);
405193326Sed}
406193326Sed
407193326Sedvoid AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
408218893Sdim  llvm_unreachable("direct property access not surrounded by "
409218893Sdim                   "lvalue-to-rvalue cast");
410193326Sed}
411193326Sed
412193326Sedvoid AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
413218893Sdim  CGF.EmitIgnoredExpr(E->getLHS());
414218893Sdim  Visit(E->getRHS());
415193326Sed}
416193326Sed
417193326Sedvoid AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
418218893Sdim  CodeGenFunction::StmtExprEvaluation eval(CGF);
419218893Sdim  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
420193326Sed}
421193326Sed
422193326Sedvoid AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
423212904Sdim  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
424198398Srdivacky    VisitPointerToDataMemberBinaryOperator(E);
425198398Srdivacky  else
426198398Srdivacky    CGF.ErrorUnsupported(E, "aggregate binary expression");
427193326Sed}
428193326Sed
429198398Srdivackyvoid AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
430198398Srdivacky                                                    const BinaryOperator *E) {
431198398Srdivacky  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
432198398Srdivacky  EmitFinalDestCopy(E, LV);
433198398Srdivacky}
434198398Srdivacky
435193326Sedvoid AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
436193326Sed  // For an assignment to work, the value on the right has
437193326Sed  // to be compatible with the value on the left.
438193326Sed  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
439193326Sed                                                 E->getRHS()->getType())
440193326Sed         && "Invalid assignment");
441218893Sdim
442221345Sdim  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
443221345Sdim    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
444221345Sdim      if (VD->hasAttr<BlocksAttr>() &&
445221345Sdim          E->getRHS()->HasSideEffects(CGF.getContext())) {
446221345Sdim        // When __block variable on LHS, the RHS must be evaluated first
447221345Sdim        // as it may change the 'forwarding' field via call to Block_copy.
448221345Sdim        LValue RHS = CGF.EmitLValue(E->getRHS());
449221345Sdim        LValue LHS = CGF.EmitLValue(E->getLHS());
450226633Sdim        Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
451226633Sdim                                       needsGC(E->getLHS()->getType()),
452226633Sdim                                       AggValueSlot::IsAliased);
453221345Sdim        EmitFinalDestCopy(E, RHS, true);
454221345Sdim        return;
455221345Sdim      }
456221345Sdim
457193326Sed  LValue LHS = CGF.EmitLValue(E->getLHS());
458193326Sed
459193326Sed  // We have to special case property setters, otherwise we must have
460193326Sed  // a simple lvalue (no aggregates inside vectors, bitfields).
461193326Sed  if (LHS.isPropertyRef()) {
462221345Sdim    const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
463221345Sdim    QualType ArgType = RE->getSetterArgType();
464221345Sdim    RValue Src;
465221345Sdim    if (ArgType->isReferenceType())
466221345Sdim      Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
467221345Sdim    else {
468221345Sdim      AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
469221345Sdim      CGF.EmitAggExpr(E->getRHS(), Slot);
470221345Sdim      Src = Slot.asRValue();
471221345Sdim    }
472221345Sdim    CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
473193326Sed  } else {
474193326Sed    // Codegen the RHS so that it stores directly into the LHS.
475226633Sdim    AggValueSlot LHSSlot =
476226633Sdim      AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
477226633Sdim                              needsGC(E->getLHS()->getType()),
478226633Sdim                              AggValueSlot::IsAliased);
479218893Sdim    CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
480193326Sed    EmitFinalDestCopy(E, LHS, true);
481193326Sed  }
482193326Sed}
483193326Sed
484218893Sdimvoid AggExprEmitter::
485218893SdimVisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
486193326Sed  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
487193326Sed  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
488193326Sed  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
489198092Srdivacky
490218893Sdim  // Bind the common expression if necessary.
491218893Sdim  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
492218893Sdim
493218893Sdim  CodeGenFunction::ConditionalEvaluation eval(CGF);
494201361Srdivacky  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
495198092Srdivacky
496218893Sdim  // Save whether the destination's lifetime is externally managed.
497226633Sdim  bool isExternallyDestructed = Dest.isExternallyDestructed();
498218893Sdim
499218893Sdim  eval.begin(CGF);
500193326Sed  CGF.EmitBlock(LHSBlock);
501218893Sdim  Visit(E->getTrueExpr());
502218893Sdim  eval.end(CGF);
503198092Srdivacky
504218893Sdim  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
505218893Sdim  CGF.Builder.CreateBr(ContBlock);
506193326Sed
507218893Sdim  // If the result of an agg expression is unused, then the emission
508218893Sdim  // of the LHS might need to create a destination slot.  That's fine
509218893Sdim  // with us, and we can safely emit the RHS into the same slot, but
510226633Sdim  // we shouldn't claim that it's already being destructed.
511226633Sdim  Dest.setExternallyDestructed(isExternallyDestructed);
512198092Srdivacky
513218893Sdim  eval.begin(CGF);
514193326Sed  CGF.EmitBlock(RHSBlock);
515218893Sdim  Visit(E->getFalseExpr());
516218893Sdim  eval.end(CGF);
517198092Srdivacky
518193326Sed  CGF.EmitBlock(ContBlock);
519193326Sed}
520193326Sed
521198092Srdivackyvoid AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
522198092Srdivacky  Visit(CE->getChosenSubExpr(CGF.getContext()));
523198092Srdivacky}
524198092Srdivacky
525193326Sedvoid AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
526193326Sed  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
527193326Sed  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
528193326Sed
529193326Sed  if (!ArgPtr) {
530193326Sed    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
531193326Sed    return;
532193326Sed  }
533193326Sed
534212904Sdim  EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
535193326Sed}
536193326Sed
537193326Sedvoid AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
538218893Sdim  // Ensure that we have a slot, but if we already do, remember
539226633Sdim  // whether it was externally destructed.
540226633Sdim  bool wasExternallyDestructed = Dest.isExternallyDestructed();
541218893Sdim  Dest = EnsureSlot(E->getType());
542198092Srdivacky
543226633Sdim  // We're going to push a destructor if there isn't already one.
544226633Sdim  Dest.setExternallyDestructed();
545226633Sdim
546218893Sdim  Visit(E->getSubExpr());
547193326Sed
548226633Sdim  // Push that destructor we promised.
549226633Sdim  if (!wasExternallyDestructed)
550218893Sdim    CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
551193326Sed}
552193326Sed
553193326Sedvoid
554193326SedAggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
555218893Sdim  AggValueSlot Slot = EnsureSlot(E->getType());
556218893Sdim  CGF.EmitCXXConstructExpr(E, Slot);
557193326Sed}
558193326Sed
559218893Sdimvoid AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
560218893Sdim  CGF.EmitExprWithCleanups(E, Dest);
561193326Sed}
562193326Sed
563210299Sedvoid AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
564218893Sdim  QualType T = E->getType();
565218893Sdim  AggValueSlot Slot = EnsureSlot(T);
566224145Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
567198398Srdivacky}
568198398Srdivacky
569201361Srdivackyvoid AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
570218893Sdim  QualType T = E->getType();
571218893Sdim  AggValueSlot Slot = EnsureSlot(T);
572224145Sdim  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
573218893Sdim}
574201361Srdivacky
575218893Sdim/// isSimpleZero - If emitting this value will obviously just cause a store of
576218893Sdim/// zero to memory, return true.  This can return false if uncertain, so it just
577218893Sdim/// handles simple cases.
578218893Sdimstatic bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
579221345Sdim  E = E->IgnoreParens();
580221345Sdim
581218893Sdim  // 0
582218893Sdim  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
583218893Sdim    return IL->getValue() == 0;
584218893Sdim  // +0.0
585218893Sdim  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
586218893Sdim    return FL->getValue().isPosZero();
587218893Sdim  // int()
588218893Sdim  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
589218893Sdim      CGF.getTypes().isZeroInitializable(E->getType()))
590218893Sdim    return true;
591218893Sdim  // (int*)0 - Null pointer expressions.
592218893Sdim  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
593218893Sdim    return ICE->getCastKind() == CK_NullToPointer;
594218893Sdim  // '\0'
595218893Sdim  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
596218893Sdim    return CL->getValue() == 0;
597218893Sdim
598218893Sdim  // Otherwise, hard case: conservatively return false.
599218893Sdim  return false;
600201361Srdivacky}
601201361Srdivacky
602218893Sdim
603203955Srdivackyvoid
604224145SdimAggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
605224145Sdim  QualType type = LV.getType();
606193326Sed  // FIXME: Ignore result?
607193326Sed  // FIXME: Are initializers affected by volatile?
608218893Sdim  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
609218893Sdim    // Storing "i32 0" to a zero'd memory location is a noop.
610218893Sdim  } else if (isa<ImplicitValueInitExpr>(E)) {
611224145Sdim    EmitNullInitializationToLValue(LV);
612224145Sdim  } else if (type->isReferenceType()) {
613210299Sed    RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
614224145Sdim    CGF.EmitStoreThroughLValue(RV, LV);
615224145Sdim  } else if (type->isAnyComplexType()) {
616193326Sed    CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
617224145Sdim  } else if (CGF.hasAggregateLLVMType(type)) {
618226633Sdim    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
619226633Sdim                                               AggValueSlot::IsDestructed,
620226633Sdim                                      AggValueSlot::DoesNotNeedGCBarriers,
621226633Sdim                                               AggValueSlot::IsNotAliased,
622224145Sdim                                               Dest.isZeroed()));
623224145Sdim  } else if (LV.isSimple()) {
624224145Sdim    CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
625193326Sed  } else {
626224145Sdim    CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
627193326Sed  }
628193326Sed}
629193326Sed
630224145Sdimvoid AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
631224145Sdim  QualType type = lv.getType();
632224145Sdim
633218893Sdim  // If the destination slot is already zeroed out before the aggregate is
634218893Sdim  // copied into it, we don't have to emit any zeros here.
635224145Sdim  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
636218893Sdim    return;
637218893Sdim
638224145Sdim  if (!CGF.hasAggregateLLVMType(type)) {
639193326Sed    // For non-aggregates, we can store zero
640224145Sdim    llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
641224145Sdim    CGF.EmitStoreThroughLValue(RValue::get(null), lv);
642193326Sed  } else {
643193326Sed    // There's a potential optimization opportunity in combining
644193326Sed    // memsets; that would be easy for arrays, but relatively
645193326Sed    // difficult for structures with the current code.
646224145Sdim    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
647193326Sed  }
648193326Sed}
649193326Sed
650193326Sedvoid AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
651193326Sed#if 0
652200583Srdivacky  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
653200583Srdivacky  // (Length of globals? Chunks of zeroed-out space?).
654193326Sed  //
655193326Sed  // If we can, prefer a copy from a global; this is a lot less code for long
656193326Sed  // globals, and it's easier for the current optimizers to analyze.
657200583Srdivacky  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
658193326Sed    llvm::GlobalVariable* GV =
659200583Srdivacky    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
660200583Srdivacky                             llvm::GlobalValue::InternalLinkage, C, "");
661212904Sdim    EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
662193326Sed    return;
663193326Sed  }
664193326Sed#endif
665218893Sdim  if (E->hadArrayRangeDesignator())
666193326Sed    CGF.ErrorUnsupported(E, "GNU array range designator extension");
667193326Sed
668218893Sdim  llvm::Value *DestPtr = Dest.getAddr();
669218893Sdim
670193326Sed  // Handle initialization of an array.
671193326Sed  if (E->getType()->isArrayType()) {
672226633Sdim    llvm::PointerType *APType =
673193326Sed      cast<llvm::PointerType>(DestPtr->getType());
674226633Sdim    llvm::ArrayType *AType =
675193326Sed      cast<llvm::ArrayType>(APType->getElementType());
676198092Srdivacky
677193326Sed    uint64_t NumInitElements = E->getNumInits();
678193326Sed
679193326Sed    if (E->getNumInits() > 0) {
680193326Sed      QualType T1 = E->getType();
681193326Sed      QualType T2 = E->getInit(0)->getType();
682193326Sed      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
683193326Sed        EmitAggLoadOfLValue(E->getInit(0));
684193326Sed        return;
685193326Sed      }
686193326Sed    }
687193326Sed
688193326Sed    uint64_t NumArrayElements = AType->getNumElements();
689224145Sdim    assert(NumInitElements <= NumArrayElements);
690193326Sed
691224145Sdim    QualType elementType = E->getType().getCanonicalType();
692224145Sdim    elementType = CGF.getContext().getQualifiedType(
693224145Sdim                    cast<ArrayType>(elementType)->getElementType(),
694224145Sdim                    elementType.getQualifiers() + Dest.getQualifiers());
695224145Sdim
696224145Sdim    // DestPtr is an array*.  Construct an elementType* by drilling
697224145Sdim    // down a level.
698224145Sdim    llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
699224145Sdim    llvm::Value *indices[] = { zero, zero };
700224145Sdim    llvm::Value *begin =
701226633Sdim      Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
702224145Sdim
703224145Sdim    // Exception safety requires us to destroy all the
704224145Sdim    // already-constructed members if an initializer throws.
705224145Sdim    // For that, we'll need an EH cleanup.
706224145Sdim    QualType::DestructionKind dtorKind = elementType.isDestructedType();
707224145Sdim    llvm::AllocaInst *endOfInit = 0;
708224145Sdim    EHScopeStack::stable_iterator cleanup;
709224145Sdim    if (CGF.needsEHCleanup(dtorKind)) {
710224145Sdim      // In principle we could tell the cleanup where we are more
711224145Sdim      // directly, but the control flow can get so varied here that it
712224145Sdim      // would actually be quite complex.  Therefore we go through an
713224145Sdim      // alloca.
714224145Sdim      endOfInit = CGF.CreateTempAlloca(begin->getType(),
715224145Sdim                                       "arrayinit.endOfInit");
716224145Sdim      Builder.CreateStore(begin, endOfInit);
717224145Sdim      CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
718224145Sdim                                           CGF.getDestroyer(dtorKind));
719224145Sdim      cleanup = CGF.EHStack.stable_begin();
720224145Sdim
721224145Sdim    // Otherwise, remember that we didn't need a cleanup.
722224145Sdim    } else {
723224145Sdim      dtorKind = QualType::DK_none;
724224145Sdim    }
725224145Sdim
726224145Sdim    llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
727224145Sdim
728224145Sdim    // The 'current element to initialize'.  The invariants on this
729224145Sdim    // variable are complicated.  Essentially, after each iteration of
730224145Sdim    // the loop, it points to the last initialized element, except
731224145Sdim    // that it points to the beginning of the array before any
732224145Sdim    // elements have been initialized.
733224145Sdim    llvm::Value *element = begin;
734224145Sdim
735224145Sdim    // Emit the explicit initializers.
736224145Sdim    for (uint64_t i = 0; i != NumInitElements; ++i) {
737224145Sdim      // Advance to the next element.
738224145Sdim      if (i > 0) {
739224145Sdim        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
740224145Sdim
741224145Sdim        // Tell the cleanup that it needs to destroy up to this
742224145Sdim        // element.  TODO: some of these stores can be trivially
743224145Sdim        // observed to be unnecessary.
744224145Sdim        if (endOfInit) Builder.CreateStore(element, endOfInit);
745221345Sdim      }
746221345Sdim
747224145Sdim      LValue elementLV = CGF.MakeAddrLValue(element, elementType);
748224145Sdim      EmitInitializationToLValue(E->getInit(i), elementLV);
749224145Sdim    }
750198092Srdivacky
751224145Sdim    // Check whether there's a non-trivial array-fill expression.
752224145Sdim    // Note that this will be a CXXConstructExpr even if the element
753224145Sdim    // type is an array (or array of array, etc.) of class type.
754224145Sdim    Expr *filler = E->getArrayFiller();
755224145Sdim    bool hasTrivialFiller = true;
756224145Sdim    if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
757224145Sdim      assert(cons->getConstructor()->isDefaultConstructor());
758224145Sdim      hasTrivialFiller = cons->getConstructor()->isTrivial();
759224145Sdim    }
760218893Sdim
761224145Sdim    // Any remaining elements need to be zero-initialized, possibly
762224145Sdim    // using the filler expression.  We can skip this if the we're
763224145Sdim    // emitting to zeroed memory.
764224145Sdim    if (NumInitElements != NumArrayElements &&
765224145Sdim        !(Dest.isZeroed() && hasTrivialFiller &&
766224145Sdim          CGF.getTypes().isZeroInitializable(elementType))) {
767224145Sdim
768224145Sdim      // Use an actual loop.  This is basically
769224145Sdim      //   do { *array++ = filler; } while (array != end);
770224145Sdim
771224145Sdim      // Advance to the start of the rest of the array.
772224145Sdim      if (NumInitElements) {
773224145Sdim        element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
774224145Sdim        if (endOfInit) Builder.CreateStore(element, endOfInit);
775224145Sdim      }
776224145Sdim
777224145Sdim      // Compute the end of the array.
778224145Sdim      llvm::Value *end = Builder.CreateInBoundsGEP(begin,
779224145Sdim                        llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
780224145Sdim                                                   "arrayinit.end");
781224145Sdim
782224145Sdim      llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
783224145Sdim      llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
784224145Sdim
785224145Sdim      // Jump into the body.
786224145Sdim      CGF.EmitBlock(bodyBB);
787224145Sdim      llvm::PHINode *currentElement =
788224145Sdim        Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
789224145Sdim      currentElement->addIncoming(element, entryBB);
790224145Sdim
791224145Sdim      // Emit the actual filler expression.
792224145Sdim      LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
793224145Sdim      if (filler)
794224145Sdim        EmitInitializationToLValue(filler, elementLV);
795193326Sed      else
796224145Sdim        EmitNullInitializationToLValue(elementLV);
797224145Sdim
798224145Sdim      // Move on to the next element.
799224145Sdim      llvm::Value *nextElement =
800224145Sdim        Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
801224145Sdim
802224145Sdim      // Tell the EH cleanup that we finished with the last element.
803224145Sdim      if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
804224145Sdim
805224145Sdim      // Leave the loop if we're done.
806224145Sdim      llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
807224145Sdim                                               "arrayinit.done");
808224145Sdim      llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
809224145Sdim      Builder.CreateCondBr(done, endBB, bodyBB);
810224145Sdim      currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
811224145Sdim
812224145Sdim      CGF.EmitBlock(endBB);
813193326Sed    }
814224145Sdim
815224145Sdim    // Leave the partial-array cleanup if we entered one.
816224145Sdim    if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
817224145Sdim
818193326Sed    return;
819193326Sed  }
820198092Srdivacky
821193326Sed  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
822198092Srdivacky
823193326Sed  // Do struct initialization; this code just sets each individual member
824193326Sed  // to the approprate value.  This makes bitfield support automatic;
825193326Sed  // the disadvantage is that the generated code is more difficult for
826193326Sed  // the optimizer, especially with bitfields.
827193326Sed  unsigned NumInitElements = E->getNumInits();
828224145Sdim  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
829212904Sdim
830224145Sdim  if (record->isUnion()) {
831193326Sed    // Only initialize one field of a union. The field itself is
832193326Sed    // specified by the initializer list.
833193326Sed    if (!E->getInitializedFieldInUnion()) {
834193326Sed      // Empty union; we have nothing to do.
835198092Srdivacky
836193326Sed#ifndef NDEBUG
837193326Sed      // Make sure that it's really an empty and not a failure of
838193326Sed      // semantic analysis.
839224145Sdim      for (RecordDecl::field_iterator Field = record->field_begin(),
840224145Sdim                                   FieldEnd = record->field_end();
841193326Sed           Field != FieldEnd; ++Field)
842193326Sed        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
843193326Sed#endif
844193326Sed      return;
845193326Sed    }
846193326Sed
847193326Sed    // FIXME: volatility
848193326Sed    FieldDecl *Field = E->getInitializedFieldInUnion();
849218893Sdim
850203955Srdivacky    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
851193326Sed    if (NumInitElements) {
852193326Sed      // Store the initializer into the field
853224145Sdim      EmitInitializationToLValue(E->getInit(0), FieldLoc);
854193326Sed    } else {
855218893Sdim      // Default-initialize to null.
856224145Sdim      EmitNullInitializationToLValue(FieldLoc);
857193326Sed    }
858193326Sed
859193326Sed    return;
860193326Sed  }
861198092Srdivacky
862224145Sdim  // We'll need to enter cleanup scopes in case any of the member
863224145Sdim  // initializers throw an exception.
864226633Sdim  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
865224145Sdim
866193326Sed  // Here we iterate over the fields; this makes it simpler to both
867193326Sed  // default-initialize fields and skip over unnamed fields.
868224145Sdim  unsigned curInitIndex = 0;
869224145Sdim  for (RecordDecl::field_iterator field = record->field_begin(),
870224145Sdim                               fieldEnd = record->field_end();
871224145Sdim       field != fieldEnd; ++field) {
872224145Sdim    // We're done once we hit the flexible array member.
873224145Sdim    if (field->getType()->isIncompleteArrayType())
874193326Sed      break;
875193326Sed
876224145Sdim    // Always skip anonymous bitfields.
877224145Sdim    if (field->isUnnamedBitfield())
878193326Sed      continue;
879193326Sed
880224145Sdim    // We're done if we reach the end of the explicit initializers, we
881224145Sdim    // have a zeroed object, and the rest of the fields are
882224145Sdim    // zero-initializable.
883224145Sdim    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
884218893Sdim        CGF.getTypes().isZeroInitializable(E->getType()))
885218893Sdim      break;
886218893Sdim
887193326Sed    // FIXME: volatility
888224145Sdim    LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
889193326Sed    // We never generate write-barries for initialized fields.
890224145Sdim    LV.setNonGC(true);
891218893Sdim
892224145Sdim    if (curInitIndex < NumInitElements) {
893204962Srdivacky      // Store the initializer into the field.
894224145Sdim      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
895193326Sed    } else {
896193326Sed      // We're out of initalizers; default-initialize to null
897224145Sdim      EmitNullInitializationToLValue(LV);
898193326Sed    }
899224145Sdim
900224145Sdim    // Push a destructor if necessary.
901224145Sdim    // FIXME: if we have an array of structures, all explicitly
902224145Sdim    // initialized, we can end up pushing a linear number of cleanups.
903224145Sdim    bool pushedCleanup = false;
904224145Sdim    if (QualType::DestructionKind dtorKind
905224145Sdim          = field->getType().isDestructedType()) {
906224145Sdim      assert(LV.isSimple());
907224145Sdim      if (CGF.needsEHCleanup(dtorKind)) {
908224145Sdim        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
909224145Sdim                        CGF.getDestroyer(dtorKind), false);
910224145Sdim        cleanups.push_back(CGF.EHStack.stable_begin());
911224145Sdim        pushedCleanup = true;
912224145Sdim      }
913224145Sdim    }
914218893Sdim
915218893Sdim    // If the GEP didn't get used because of a dead zero init or something
916218893Sdim    // else, clean it up for -O0 builds and general tidiness.
917224145Sdim    if (!pushedCleanup && LV.isSimple())
918218893Sdim      if (llvm::GetElementPtrInst *GEP =
919224145Sdim            dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
920218893Sdim        if (GEP->use_empty())
921218893Sdim          GEP->eraseFromParent();
922193326Sed  }
923224145Sdim
924224145Sdim  // Deactivate all the partial cleanups in reverse order, which
925224145Sdim  // generally means popping them.
926224145Sdim  for (unsigned i = cleanups.size(); i != 0; --i)
927224145Sdim    CGF.DeactivateCleanupBlock(cleanups[i-1]);
928193326Sed}
929193326Sed
930193326Sed//===----------------------------------------------------------------------===//
931193326Sed//                        Entry Points into this File
932193326Sed//===----------------------------------------------------------------------===//
933193326Sed
934218893Sdim/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
935218893Sdim/// non-zero bytes that will be stored when outputting the initializer for the
936218893Sdim/// specified initializer expression.
937221345Sdimstatic CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
938221345Sdim  E = E->IgnoreParens();
939218893Sdim
940218893Sdim  // 0 and 0.0 won't require any non-zero stores!
941221345Sdim  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
942218893Sdim
943218893Sdim  // If this is an initlist expr, sum up the size of sizes of the (present)
944218893Sdim  // elements.  If this is something weird, assume the whole thing is non-zero.
945218893Sdim  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
946218893Sdim  if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
947221345Sdim    return CGF.getContext().getTypeSizeInChars(E->getType());
948218893Sdim
949218893Sdim  // InitListExprs for structs have to be handled carefully.  If there are
950218893Sdim  // reference members, we need to consider the size of the reference, not the
951218893Sdim  // referencee.  InitListExprs for unions and arrays can't have references.
952218893Sdim  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
953218893Sdim    if (!RT->isUnionType()) {
954218893Sdim      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
955221345Sdim      CharUnits NumNonZeroBytes = CharUnits::Zero();
956218893Sdim
957218893Sdim      unsigned ILEElement = 0;
958218893Sdim      for (RecordDecl::field_iterator Field = SD->field_begin(),
959218893Sdim           FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
960218893Sdim        // We're done once we hit the flexible array member or run out of
961218893Sdim        // InitListExpr elements.
962218893Sdim        if (Field->getType()->isIncompleteArrayType() ||
963218893Sdim            ILEElement == ILE->getNumInits())
964218893Sdim          break;
965218893Sdim        if (Field->isUnnamedBitfield())
966218893Sdim          continue;
967218893Sdim
968218893Sdim        const Expr *E = ILE->getInit(ILEElement++);
969218893Sdim
970218893Sdim        // Reference values are always non-null and have the width of a pointer.
971218893Sdim        if (Field->getType()->isReferenceType())
972221345Sdim          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
973226633Sdim              CGF.getContext().getTargetInfo().getPointerWidth(0));
974218893Sdim        else
975218893Sdim          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
976218893Sdim      }
977218893Sdim
978218893Sdim      return NumNonZeroBytes;
979218893Sdim    }
980218893Sdim  }
981218893Sdim
982218893Sdim
983221345Sdim  CharUnits NumNonZeroBytes = CharUnits::Zero();
984218893Sdim  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
985218893Sdim    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
986218893Sdim  return NumNonZeroBytes;
987218893Sdim}
988218893Sdim
989218893Sdim/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
990218893Sdim/// zeros in it, emit a memset and avoid storing the individual zeros.
991218893Sdim///
992218893Sdimstatic void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
993218893Sdim                                     CodeGenFunction &CGF) {
994218893Sdim  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
995218893Sdim  // volatile stores.
996218893Sdim  if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
997221345Sdim
998221345Sdim  // C++ objects with a user-declared constructor don't need zero'ing.
999221345Sdim  if (CGF.getContext().getLangOptions().CPlusPlus)
1000221345Sdim    if (const RecordType *RT = CGF.getContext()
1001221345Sdim                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1002221345Sdim      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1003221345Sdim      if (RD->hasUserDeclaredConstructor())
1004221345Sdim        return;
1005221345Sdim    }
1006221345Sdim
1007218893Sdim  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1008221345Sdim  std::pair<CharUnits, CharUnits> TypeInfo =
1009221345Sdim    CGF.getContext().getTypeInfoInChars(E->getType());
1010221345Sdim  if (TypeInfo.first <= CharUnits::fromQuantity(16))
1011218893Sdim    return;
1012218893Sdim
1013218893Sdim  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1014218893Sdim  // we prefer to emit memset + individual stores for the rest.
1015221345Sdim  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1016221345Sdim  if (NumNonZeroBytes*4 > TypeInfo.first)
1017218893Sdim    return;
1018218893Sdim
1019218893Sdim  // Okay, it seems like a good idea to use an initial memset, emit the call.
1020221345Sdim  llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
1021221345Sdim  CharUnits Align = TypeInfo.second;
1022218893Sdim
1023218893Sdim  llvm::Value *Loc = Slot.getAddr();
1024226633Sdim  llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1025218893Sdim
1026218893Sdim  Loc = CGF.Builder.CreateBitCast(Loc, BP);
1027221345Sdim  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1028221345Sdim                           Align.getQuantity(), false);
1029218893Sdim
1030218893Sdim  // Tell the AggExprEmitter that the slot is known zero.
1031218893Sdim  Slot.setZeroed();
1032218893Sdim}
1033218893Sdim
1034218893Sdim
1035218893Sdim
1036218893Sdim
1037193326Sed/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1038193326Sed/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1039193326Sed/// the value of the aggregate expression is not needed.  If VolatileDest is
1040193326Sed/// true, DestPtr cannot be 0.
1041218893Sdim///
1042218893Sdim/// \param IsInitializer - true if this evaluation is initializing an
1043218893Sdim/// object whose lifetime is already being managed.
1044218893Sdimvoid CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
1045218893Sdim                                  bool IgnoreResult) {
1046193326Sed  assert(E && hasAggregateLLVMType(E->getType()) &&
1047193326Sed         "Invalid aggregate expression to emit");
1048218893Sdim  assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1049218893Sdim         "slot has bits but no address");
1050198092Srdivacky
1051218893Sdim  // Optimize the slot if possible.
1052218893Sdim  CheckAggExprForMemSetUse(Slot, E, *this);
1053218893Sdim
1054218893Sdim  AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
1055193326Sed}
1056193326Sed
1057203955SrdivackyLValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1058203955Srdivacky  assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
1059203955Srdivacky  llvm::Value *Temp = CreateMemTemp(E->getType());
1060212904Sdim  LValue LV = MakeAddrLValue(Temp, E->getType());
1061226633Sdim  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1062226633Sdim                                         AggValueSlot::DoesNotNeedGCBarriers,
1063226633Sdim                                         AggValueSlot::IsNotAliased));
1064212904Sdim  return LV;
1065203955Srdivacky}
1066203955Srdivacky
1067193326Sedvoid CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1068193326Sed                                        llvm::Value *SrcPtr, QualType Ty,
1069193326Sed                                        bool isVolatile) {
1070193326Sed  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1071198092Srdivacky
1072207619Srdivacky  if (getContext().getLangOptions().CPlusPlus) {
1073207619Srdivacky    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1074208600Srdivacky      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1075208600Srdivacky      assert((Record->hasTrivialCopyConstructor() ||
1076226633Sdim              Record->hasTrivialCopyAssignment() ||
1077226633Sdim              Record->hasTrivialMoveConstructor() ||
1078226633Sdim              Record->hasTrivialMoveAssignment()) &&
1079208600Srdivacky             "Trying to aggregate-copy a type without a trivial copy "
1080208600Srdivacky             "constructor or assignment operator");
1081208600Srdivacky      // Ignore empty classes in C++.
1082208600Srdivacky      if (Record->isEmpty())
1083207619Srdivacky        return;
1084207619Srdivacky    }
1085207619Srdivacky  }
1086207619Srdivacky
1087193326Sed  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1088193326Sed  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1089193326Sed  // read from another object that overlaps in anyway the storage of the first
1090193326Sed  // object, then the overlap shall be exact and the two objects shall have
1091193326Sed  // qualified or unqualified versions of a compatible type."
1092193326Sed  //
1093193326Sed  // memcpy is not defined if the source and destination pointers are exactly
1094193326Sed  // equal, but other compilers do this optimization, and almost every memcpy
1095193326Sed  // implementation handles this case safely.  If there is a libc that does not
1096193326Sed  // safely handle this, we can add a target hook.
1097198092Srdivacky
1098193326Sed  // Get size and alignment info for this aggregate.
1099221345Sdim  std::pair<CharUnits, CharUnits> TypeInfo =
1100221345Sdim    getContext().getTypeInfoInChars(Ty);
1101198092Srdivacky
1102193326Sed  // FIXME: Handle variable sized types.
1103198092Srdivacky
1104193326Sed  // FIXME: If we have a volatile struct, the optimizer can remove what might
1105193326Sed  // appear to be `extra' memory ops:
1106193326Sed  //
1107193326Sed  // volatile struct { int i; } a, b;
1108193326Sed  //
1109193326Sed  // int main() {
1110193326Sed  //   a = b;
1111193326Sed  //   a = b;
1112193326Sed  // }
1113193326Sed  //
1114206275Srdivacky  // we need to use a different call here.  We use isVolatile to indicate when
1115193326Sed  // either the source or the destination is volatile.
1116206275Srdivacky
1117226633Sdim  llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1118226633Sdim  llvm::Type *DBP =
1119218893Sdim    llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1120226633Sdim  DestPtr = Builder.CreateBitCast(DestPtr, DBP);
1121206275Srdivacky
1122226633Sdim  llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1123226633Sdim  llvm::Type *SBP =
1124218893Sdim    llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1125226633Sdim  SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
1126206275Srdivacky
1127224145Sdim  // Don't do any of the memmove_collectable tests if GC isn't set.
1128226633Sdim  if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
1129224145Sdim    // fall through
1130224145Sdim  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1131210299Sed    RecordDecl *Record = RecordTy->getDecl();
1132210299Sed    if (Record->hasObjectMember()) {
1133221345Sdim      CharUnits size = TypeInfo.first;
1134226633Sdim      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1135221345Sdim      llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1136210299Sed      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1137210299Sed                                                    SizeVal);
1138210299Sed      return;
1139210299Sed    }
1140224145Sdim  } else if (Ty->isArrayType()) {
1141210299Sed    QualType BaseType = getContext().getBaseElementType(Ty);
1142210299Sed    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1143210299Sed      if (RecordTy->getDecl()->hasObjectMember()) {
1144221345Sdim        CharUnits size = TypeInfo.first;
1145226633Sdim        llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1146221345Sdim        llvm::Value *SizeVal =
1147221345Sdim          llvm::ConstantInt::get(SizeTy, size.getQuantity());
1148210299Sed        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1149210299Sed                                                      SizeVal);
1150210299Sed        return;
1151210299Sed      }
1152210299Sed    }
1153210299Sed  }
1154210299Sed
1155218893Sdim  Builder.CreateMemCpy(DestPtr, SrcPtr,
1156221345Sdim                       llvm::ConstantInt::get(IntPtrTy,
1157221345Sdim                                              TypeInfo.first.getQuantity()),
1158221345Sdim                       TypeInfo.second.getQuantity(), isVolatile);
1159193326Sed}
1160