1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCXXABI.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenModule.h"
19#include "clang/AST/APValue.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/Builtins.h"
24#include "llvm/IR/Constants.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalVariable.h"
28using namespace clang;
29using namespace CodeGen;
30
31//===----------------------------------------------------------------------===//
32//                            ConstStructBuilder
33//===----------------------------------------------------------------------===//
34
35namespace {
36class ConstStructBuilder {
37  CodeGenModule &CGM;
38  CodeGenFunction *CGF;
39
40  bool Packed;
41  CharUnits NextFieldOffsetInChars;
42  CharUnits LLVMStructAlignment;
43  SmallVector<llvm::Constant *, 32> Elements;
44public:
45  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46                                     InitListExpr *ILE);
47  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
48                                     const APValue &Value, QualType ValTy);
49
50private:
51  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
52    : CGM(CGM), CGF(CGF), Packed(false),
53    NextFieldOffsetInChars(CharUnits::Zero()),
54    LLVMStructAlignment(CharUnits::One()) { }
55
56  void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
57                   llvm::Constant *InitExpr);
58
59  void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
60
61  void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
62                      llvm::ConstantInt *InitExpr);
63
64  void AppendPadding(CharUnits PadSize);
65
66  void AppendTailPadding(CharUnits RecordSize);
67
68  void ConvertStructToPacked();
69
70  bool Build(InitListExpr *ILE);
71  void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
72             const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
73  llvm::Constant *Finalize(QualType Ty);
74
75  CharUnits getAlignment(const llvm::Constant *C) const {
76    if (Packed)  return CharUnits::One();
77    return CharUnits::fromQuantity(
78        CGM.getDataLayout().getABITypeAlignment(C->getType()));
79  }
80
81  CharUnits getSizeInChars(const llvm::Constant *C) const {
82    return CharUnits::fromQuantity(
83        CGM.getDataLayout().getTypeAllocSize(C->getType()));
84  }
85};
86
87void ConstStructBuilder::
88AppendField(const FieldDecl *Field, uint64_t FieldOffset,
89            llvm::Constant *InitCst) {
90  const ASTContext &Context = CGM.getContext();
91
92  CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
93
94  AppendBytes(FieldOffsetInChars, InitCst);
95}
96
97void ConstStructBuilder::
98AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
99
100  assert(NextFieldOffsetInChars <= FieldOffsetInChars
101         && "Field offset mismatch!");
102
103  CharUnits FieldAlignment = getAlignment(InitCst);
104
105  // Round up the field offset to the alignment of the field type.
106  CharUnits AlignedNextFieldOffsetInChars =
107    NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
108
109  if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
110    assert(!Packed && "Alignment is wrong even with a packed struct!");
111
112    // Convert the struct to a packed struct.
113    ConvertStructToPacked();
114
115    AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
116  }
117
118  if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
119    // We need to append padding.
120    AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
121
122    assert(NextFieldOffsetInChars == FieldOffsetInChars &&
123           "Did not add enough padding!");
124
125    AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
126  }
127
128  // Add the field.
129  Elements.push_back(InitCst);
130  NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
131                           getSizeInChars(InitCst);
132
133  if (Packed)
134    assert(LLVMStructAlignment == CharUnits::One() &&
135           "Packed struct not byte-aligned!");
136  else
137    LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
138}
139
140void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
141                                        uint64_t FieldOffset,
142                                        llvm::ConstantInt *CI) {
143  const ASTContext &Context = CGM.getContext();
144  const uint64_t CharWidth = Context.getCharWidth();
145  uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
146  if (FieldOffset > NextFieldOffsetInBits) {
147    // We need to add padding.
148    CharUnits PadSize = Context.toCharUnitsFromBits(
149      llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
150                               Context.getTargetInfo().getCharAlign()));
151
152    AppendPadding(PadSize);
153  }
154
155  uint64_t FieldSize = Field->getBitWidthValue(Context);
156
157  llvm::APInt FieldValue = CI->getValue();
158
159  // Promote the size of FieldValue if necessary
160  // FIXME: This should never occur, but currently it can because initializer
161  // constants are cast to bool, and because clang is not enforcing bitfield
162  // width limits.
163  if (FieldSize > FieldValue.getBitWidth())
164    FieldValue = FieldValue.zext(FieldSize);
165
166  // Truncate the size of FieldValue to the bit field size.
167  if (FieldSize < FieldValue.getBitWidth())
168    FieldValue = FieldValue.trunc(FieldSize);
169
170  NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
171  if (FieldOffset < NextFieldOffsetInBits) {
172    // Either part of the field or the entire field can go into the previous
173    // byte.
174    assert(!Elements.empty() && "Elements can't be empty!");
175
176    unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
177
178    bool FitsCompletelyInPreviousByte =
179      BitsInPreviousByte >= FieldValue.getBitWidth();
180
181    llvm::APInt Tmp = FieldValue;
182
183    if (!FitsCompletelyInPreviousByte) {
184      unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
185
186      if (CGM.getDataLayout().isBigEndian()) {
187        Tmp = Tmp.lshr(NewFieldWidth);
188        Tmp = Tmp.trunc(BitsInPreviousByte);
189
190        // We want the remaining high bits.
191        FieldValue = FieldValue.trunc(NewFieldWidth);
192      } else {
193        Tmp = Tmp.trunc(BitsInPreviousByte);
194
195        // We want the remaining low bits.
196        FieldValue = FieldValue.lshr(BitsInPreviousByte);
197        FieldValue = FieldValue.trunc(NewFieldWidth);
198      }
199    }
200
201    Tmp = Tmp.zext(CharWidth);
202    if (CGM.getDataLayout().isBigEndian()) {
203      if (FitsCompletelyInPreviousByte)
204        Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
205    } else {
206      Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
207    }
208
209    // 'or' in the bits that go into the previous byte.
210    llvm::Value *LastElt = Elements.back();
211    if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
212      Tmp |= Val->getValue();
213    else {
214      assert(isa<llvm::UndefValue>(LastElt));
215      // If there is an undef field that we're adding to, it can either be a
216      // scalar undef (in which case, we just replace it with our field) or it
217      // is an array.  If it is an array, we have to pull one byte off the
218      // array so that the other undef bytes stay around.
219      if (!isa<llvm::IntegerType>(LastElt->getType())) {
220        // The undef padding will be a multibyte array, create a new smaller
221        // padding and then an hole for our i8 to get plopped into.
222        assert(isa<llvm::ArrayType>(LastElt->getType()) &&
223               "Expected array padding of undefs");
224        llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
225        assert(AT->getElementType()->isIntegerTy(CharWidth) &&
226               AT->getNumElements() != 0 &&
227               "Expected non-empty array padding of undefs");
228
229        // Remove the padding array.
230        NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
231        Elements.pop_back();
232
233        // Add the padding back in two chunks.
234        AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
235        AppendPadding(CharUnits::One());
236        assert(isa<llvm::UndefValue>(Elements.back()) &&
237               Elements.back()->getType()->isIntegerTy(CharWidth) &&
238               "Padding addition didn't work right");
239      }
240    }
241
242    Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
243
244    if (FitsCompletelyInPreviousByte)
245      return;
246  }
247
248  while (FieldValue.getBitWidth() > CharWidth) {
249    llvm::APInt Tmp;
250
251    if (CGM.getDataLayout().isBigEndian()) {
252      // We want the high bits.
253      Tmp =
254        FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
255    } else {
256      // We want the low bits.
257      Tmp = FieldValue.trunc(CharWidth);
258
259      FieldValue = FieldValue.lshr(CharWidth);
260    }
261
262    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
263    ++NextFieldOffsetInChars;
264
265    FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
266  }
267
268  assert(FieldValue.getBitWidth() > 0 &&
269         "Should have at least one bit left!");
270  assert(FieldValue.getBitWidth() <= CharWidth &&
271         "Should not have more than a byte left!");
272
273  if (FieldValue.getBitWidth() < CharWidth) {
274    if (CGM.getDataLayout().isBigEndian()) {
275      unsigned BitWidth = FieldValue.getBitWidth();
276
277      FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
278    } else
279      FieldValue = FieldValue.zext(CharWidth);
280  }
281
282  // Append the last element.
283  Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
284                                            FieldValue));
285  ++NextFieldOffsetInChars;
286}
287
288void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
289  if (PadSize.isZero())
290    return;
291
292  llvm::Type *Ty = CGM.Int8Ty;
293  if (PadSize > CharUnits::One())
294    Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
295
296  llvm::Constant *C = llvm::UndefValue::get(Ty);
297  Elements.push_back(C);
298  assert(getAlignment(C) == CharUnits::One() &&
299         "Padding must have 1 byte alignment!");
300
301  NextFieldOffsetInChars += getSizeInChars(C);
302}
303
304void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
305  assert(NextFieldOffsetInChars <= RecordSize &&
306         "Size mismatch!");
307
308  AppendPadding(RecordSize - NextFieldOffsetInChars);
309}
310
311void ConstStructBuilder::ConvertStructToPacked() {
312  SmallVector<llvm::Constant *, 16> PackedElements;
313  CharUnits ElementOffsetInChars = CharUnits::Zero();
314
315  for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
316    llvm::Constant *C = Elements[i];
317
318    CharUnits ElementAlign = CharUnits::fromQuantity(
319      CGM.getDataLayout().getABITypeAlignment(C->getType()));
320    CharUnits AlignedElementOffsetInChars =
321      ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
322
323    if (AlignedElementOffsetInChars > ElementOffsetInChars) {
324      // We need some padding.
325      CharUnits NumChars =
326        AlignedElementOffsetInChars - ElementOffsetInChars;
327
328      llvm::Type *Ty = CGM.Int8Ty;
329      if (NumChars > CharUnits::One())
330        Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
331
332      llvm::Constant *Padding = llvm::UndefValue::get(Ty);
333      PackedElements.push_back(Padding);
334      ElementOffsetInChars += getSizeInChars(Padding);
335    }
336
337    PackedElements.push_back(C);
338    ElementOffsetInChars += getSizeInChars(C);
339  }
340
341  assert(ElementOffsetInChars == NextFieldOffsetInChars &&
342         "Packing the struct changed its size!");
343
344  Elements.swap(PackedElements);
345  LLVMStructAlignment = CharUnits::One();
346  Packed = true;
347}
348
349bool ConstStructBuilder::Build(InitListExpr *ILE) {
350  RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
351  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
352
353  unsigned FieldNo = 0;
354  unsigned ElementNo = 0;
355
356  for (RecordDecl::field_iterator Field = RD->field_begin(),
357       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
358    // If this is a union, skip all the fields that aren't being initialized.
359    if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
360      continue;
361
362    // Don't emit anonymous bitfields, they just affect layout.
363    if (Field->isUnnamedBitfield())
364      continue;
365
366    // Get the initializer.  A struct can include fields without initializers,
367    // we just use explicit null values for them.
368    llvm::Constant *EltInit;
369    if (ElementNo < ILE->getNumInits())
370      EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
371                                     Field->getType(), CGF);
372    else
373      EltInit = CGM.EmitNullConstant(Field->getType());
374
375    if (!EltInit)
376      return false;
377
378    if (!Field->isBitField()) {
379      // Handle non-bitfield members.
380      AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
381    } else {
382      // Otherwise we have a bitfield.
383      AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
384                     cast<llvm::ConstantInt>(EltInit));
385    }
386  }
387
388  return true;
389}
390
391namespace {
392struct BaseInfo {
393  BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
394    : Decl(Decl), Offset(Offset), Index(Index) {
395  }
396
397  const CXXRecordDecl *Decl;
398  CharUnits Offset;
399  unsigned Index;
400
401  bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
402};
403}
404
405void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
406                               bool IsPrimaryBase,
407                               const CXXRecordDecl *VTableClass,
408                               CharUnits Offset) {
409  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
410
411  if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
412    // Add a vtable pointer, if we need one and it hasn't already been added.
413    if (CD->isDynamicClass() && !IsPrimaryBase) {
414      llvm::Constant *VTableAddressPoint =
415          CGM.getCXXABI().getVTableAddressPointForConstExpr(
416              BaseSubobject(CD, Offset), VTableClass);
417      AppendBytes(Offset, VTableAddressPoint);
418    }
419
420    // Accumulate and sort bases, in order to visit them in address order, which
421    // may not be the same as declaration order.
422    SmallVector<BaseInfo, 8> Bases;
423    Bases.reserve(CD->getNumBases());
424    unsigned BaseNo = 0;
425    for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
426         BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
427      assert(!Base->isVirtual() && "should not have virtual bases here");
428      const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
429      CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
430      Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
431    }
432    std::stable_sort(Bases.begin(), Bases.end());
433
434    for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
435      BaseInfo &Base = Bases[I];
436
437      bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
438      Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
439            VTableClass, Offset + Base.Offset);
440    }
441  }
442
443  unsigned FieldNo = 0;
444  uint64_t OffsetBits = CGM.getContext().toBits(Offset);
445
446  for (RecordDecl::field_iterator Field = RD->field_begin(),
447       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
448    // If this is a union, skip all the fields that aren't being initialized.
449    if (RD->isUnion() && Val.getUnionField() != *Field)
450      continue;
451
452    // Don't emit anonymous bitfields, they just affect layout.
453    if (Field->isUnnamedBitfield())
454      continue;
455
456    // Emit the value of the initializer.
457    const APValue &FieldValue =
458      RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
459    llvm::Constant *EltInit =
460      CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
461    assert(EltInit && "EmitConstantValue can't fail");
462
463    if (!Field->isBitField()) {
464      // Handle non-bitfield members.
465      AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
466    } else {
467      // Otherwise we have a bitfield.
468      AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
469                     cast<llvm::ConstantInt>(EltInit));
470    }
471  }
472}
473
474llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
475  RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
476  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
477
478  CharUnits LayoutSizeInChars = Layout.getSize();
479
480  if (NextFieldOffsetInChars > LayoutSizeInChars) {
481    // If the struct is bigger than the size of the record type,
482    // we must have a flexible array member at the end.
483    assert(RD->hasFlexibleArrayMember() &&
484           "Must have flexible array member if struct is bigger than type!");
485
486    // No tail padding is necessary.
487  } else {
488    // Append tail padding if necessary.
489    AppendTailPadding(LayoutSizeInChars);
490
491    CharUnits LLVMSizeInChars =
492      NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
493
494    // Check if we need to convert the struct to a packed struct.
495    if (NextFieldOffsetInChars <= LayoutSizeInChars &&
496        LLVMSizeInChars > LayoutSizeInChars) {
497      assert(!Packed && "Size mismatch!");
498
499      ConvertStructToPacked();
500      assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
501             "Converting to packed did not help!");
502    }
503
504    assert(LayoutSizeInChars == NextFieldOffsetInChars &&
505           "Tail padding mismatch!");
506  }
507
508  // Pick the type to use.  If the type is layout identical to the ConvertType
509  // type then use it, otherwise use whatever the builder produced for us.
510  llvm::StructType *STy =
511      llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
512                                               Elements, Packed);
513  llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
514  if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
515    if (ValSTy->isLayoutIdentical(STy))
516      STy = ValSTy;
517  }
518
519  llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
520
521  assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
522         getSizeInChars(Result) && "Size mismatch!");
523
524  return Result;
525}
526
527llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
528                                                CodeGenFunction *CGF,
529                                                InitListExpr *ILE) {
530  ConstStructBuilder Builder(CGM, CGF);
531
532  if (!Builder.Build(ILE))
533    return 0;
534
535  return Builder.Finalize(ILE->getType());
536}
537
538llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
539                                                CodeGenFunction *CGF,
540                                                const APValue &Val,
541                                                QualType ValTy) {
542  ConstStructBuilder Builder(CGM, CGF);
543
544  const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
545  const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
546  Builder.Build(Val, RD, false, CD, CharUnits::Zero());
547
548  return Builder.Finalize(ValTy);
549}
550
551
552//===----------------------------------------------------------------------===//
553//                             ConstExprEmitter
554//===----------------------------------------------------------------------===//
555
556/// This class only needs to handle two cases:
557/// 1) Literals (this is used by APValue emission to emit literals).
558/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
559///    constant fold these types).
560class ConstExprEmitter :
561  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
562  CodeGenModule &CGM;
563  CodeGenFunction *CGF;
564  llvm::LLVMContext &VMContext;
565public:
566  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
567    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
568  }
569
570  //===--------------------------------------------------------------------===//
571  //                            Visitor Methods
572  //===--------------------------------------------------------------------===//
573
574  llvm::Constant *VisitStmt(Stmt *S) {
575    return 0;
576  }
577
578  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
579    return Visit(PE->getSubExpr());
580  }
581
582  llvm::Constant *
583  VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
584    return Visit(PE->getReplacement());
585  }
586
587  llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
588    return Visit(GE->getResultExpr());
589  }
590
591  llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
592    return Visit(CE->getChosenSubExpr());
593  }
594
595  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
596    return Visit(E->getInitializer());
597  }
598
599  llvm::Constant *VisitCastExpr(CastExpr* E) {
600    Expr *subExpr = E->getSubExpr();
601    llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
602    if (!C) return 0;
603
604    llvm::Type *destType = ConvertType(E->getType());
605
606    switch (E->getCastKind()) {
607    case CK_ToUnion: {
608      // GCC cast to union extension
609      assert(E->getType()->isUnionType() &&
610             "Destination type is not union type!");
611
612      // Build a struct with the union sub-element as the first member,
613      // and padded to the appropriate size
614      SmallVector<llvm::Constant*, 2> Elts;
615      SmallVector<llvm::Type*, 2> Types;
616      Elts.push_back(C);
617      Types.push_back(C->getType());
618      unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
619      unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
620
621      assert(CurSize <= TotalSize && "Union size mismatch!");
622      if (unsigned NumPadBytes = TotalSize - CurSize) {
623        llvm::Type *Ty = CGM.Int8Ty;
624        if (NumPadBytes > 1)
625          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
626
627        Elts.push_back(llvm::UndefValue::get(Ty));
628        Types.push_back(Ty);
629      }
630
631      llvm::StructType* STy =
632        llvm::StructType::get(C->getType()->getContext(), Types, false);
633      return llvm::ConstantStruct::get(STy, Elts);
634    }
635
636    case CK_LValueToRValue:
637    case CK_AtomicToNonAtomic:
638    case CK_NonAtomicToAtomic:
639    case CK_NoOp:
640    case CK_ConstructorConversion:
641      return C;
642
643    case CK_Dependent: llvm_unreachable("saw dependent cast!");
644
645    case CK_BuiltinFnToFnPtr:
646      llvm_unreachable("builtin functions are handled elsewhere");
647
648    case CK_ReinterpretMemberPointer:
649    case CK_DerivedToBaseMemberPointer:
650    case CK_BaseToDerivedMemberPointer:
651      return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
652
653    // These will never be supported.
654    case CK_ObjCObjectLValueCast:
655    case CK_ARCProduceObject:
656    case CK_ARCConsumeObject:
657    case CK_ARCReclaimReturnedObject:
658    case CK_ARCExtendBlockObject:
659    case CK_CopyAndAutoreleaseBlockObject:
660      return 0;
661
662    // These don't need to be handled here because Evaluate knows how to
663    // evaluate them in the cases where they can be folded.
664    case CK_BitCast:
665    case CK_ToVoid:
666    case CK_Dynamic:
667    case CK_LValueBitCast:
668    case CK_NullToMemberPointer:
669    case CK_UserDefinedConversion:
670    case CK_CPointerToObjCPointerCast:
671    case CK_BlockPointerToObjCPointerCast:
672    case CK_AnyPointerToBlockPointerCast:
673    case CK_ArrayToPointerDecay:
674    case CK_FunctionToPointerDecay:
675    case CK_BaseToDerived:
676    case CK_DerivedToBase:
677    case CK_UncheckedDerivedToBase:
678    case CK_MemberPointerToBoolean:
679    case CK_VectorSplat:
680    case CK_FloatingRealToComplex:
681    case CK_FloatingComplexToReal:
682    case CK_FloatingComplexToBoolean:
683    case CK_FloatingComplexCast:
684    case CK_FloatingComplexToIntegralComplex:
685    case CK_IntegralRealToComplex:
686    case CK_IntegralComplexToReal:
687    case CK_IntegralComplexToBoolean:
688    case CK_IntegralComplexCast:
689    case CK_IntegralComplexToFloatingComplex:
690    case CK_PointerToIntegral:
691    case CK_PointerToBoolean:
692    case CK_NullToPointer:
693    case CK_IntegralCast:
694    case CK_IntegralToPointer:
695    case CK_IntegralToBoolean:
696    case CK_IntegralToFloating:
697    case CK_FloatingToIntegral:
698    case CK_FloatingToBoolean:
699    case CK_FloatingCast:
700    case CK_ZeroToOCLEvent:
701      return 0;
702    }
703    llvm_unreachable("Invalid CastKind");
704  }
705
706  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
707    return Visit(DAE->getExpr());
708  }
709
710  llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
711    // No need for a DefaultInitExprScope: we don't handle 'this' in a
712    // constant expression.
713    return Visit(DIE->getExpr());
714  }
715
716  llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
717    return Visit(E->GetTemporaryExpr());
718  }
719
720  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
721    if (ILE->isStringLiteralInit())
722      return Visit(ILE->getInit(0));
723
724    llvm::ArrayType *AType =
725        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
726    llvm::Type *ElemTy = AType->getElementType();
727    unsigned NumInitElements = ILE->getNumInits();
728    unsigned NumElements = AType->getNumElements();
729
730    // Initialising an array requires us to automatically
731    // initialise any elements that have not been initialised explicitly
732    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
733
734    // Copy initializer elements.
735    std::vector<llvm::Constant*> Elts;
736    Elts.reserve(NumInitableElts + NumElements);
737
738    bool RewriteType = false;
739    for (unsigned i = 0; i < NumInitableElts; ++i) {
740      Expr *Init = ILE->getInit(i);
741      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
742      if (!C)
743        return 0;
744      RewriteType |= (C->getType() != ElemTy);
745      Elts.push_back(C);
746    }
747
748    // Initialize remaining array elements.
749    // FIXME: This doesn't handle member pointers correctly!
750    llvm::Constant *fillC;
751    if (Expr *filler = ILE->getArrayFiller())
752      fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
753    else
754      fillC = llvm::Constant::getNullValue(ElemTy);
755    if (!fillC)
756      return 0;
757    RewriteType |= (fillC->getType() != ElemTy);
758    Elts.resize(NumElements, fillC);
759
760    if (RewriteType) {
761      // FIXME: Try to avoid packing the array
762      std::vector<llvm::Type*> Types;
763      Types.reserve(NumInitableElts + NumElements);
764      for (unsigned i = 0, e = Elts.size(); i < e; ++i)
765        Types.push_back(Elts[i]->getType());
766      llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
767                                                            Types, true);
768      return llvm::ConstantStruct::get(SType, Elts);
769    }
770
771    return llvm::ConstantArray::get(AType, Elts);
772  }
773
774  llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
775    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
776  }
777
778  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
779    return CGM.EmitNullConstant(E->getType());
780  }
781
782  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
783    if (ILE->getType()->isArrayType())
784      return EmitArrayInitialization(ILE);
785
786    if (ILE->getType()->isRecordType())
787      return EmitRecordInitialization(ILE);
788
789    return 0;
790  }
791
792  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
793    if (!E->getConstructor()->isTrivial())
794      return 0;
795
796    QualType Ty = E->getType();
797
798    // FIXME: We should not have to call getBaseElementType here.
799    const RecordType *RT =
800      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
801    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
802
803    // If the class doesn't have a trivial destructor, we can't emit it as a
804    // constant expr.
805    if (!RD->hasTrivialDestructor())
806      return 0;
807
808    // Only copy and default constructors can be trivial.
809
810
811    if (E->getNumArgs()) {
812      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
813      assert(E->getConstructor()->isCopyOrMoveConstructor() &&
814             "trivial ctor has argument but isn't a copy/move ctor");
815
816      Expr *Arg = E->getArg(0);
817      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
818             "argument to copy ctor is of wrong type");
819
820      return Visit(Arg);
821    }
822
823    return CGM.EmitNullConstant(Ty);
824  }
825
826  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
827    return CGM.GetConstantArrayFromStringLiteral(E);
828  }
829
830  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
831    // This must be an @encode initializing an array in a static initializer.
832    // Don't emit it as the address of the string, emit the string data itself
833    // as an inline array.
834    std::string Str;
835    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
836    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
837
838    // Resize the string to the right size, adding zeros at the end, or
839    // truncating as needed.
840    Str.resize(CAT->getSize().getZExtValue(), '\0');
841    return llvm::ConstantDataArray::getString(VMContext, Str, false);
842  }
843
844  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
845    return Visit(E->getSubExpr());
846  }
847
848  // Utility methods
849  llvm::Type *ConvertType(QualType T) {
850    return CGM.getTypes().ConvertType(T);
851  }
852
853public:
854  llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
855    if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
856      if (Decl->hasAttr<WeakRefAttr>())
857        return CGM.GetWeakRefReference(Decl);
858      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
859        return CGM.GetAddrOfFunction(FD);
860      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
861        // We can never refer to a variable with local storage.
862        if (!VD->hasLocalStorage()) {
863          if (VD->isFileVarDecl() || VD->hasExternalStorage())
864            return CGM.GetAddrOfGlobalVar(VD);
865          else if (VD->isLocalVarDecl())
866            return CGM.getStaticLocalDeclAddress(VD);
867        }
868      }
869      return 0;
870    }
871
872    Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
873    switch (E->getStmtClass()) {
874    default: break;
875    case Expr::CompoundLiteralExprClass: {
876      // Note that due to the nature of compound literals, this is guaranteed
877      // to be the only use of the variable, so we just generate it here.
878      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
879      llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
880                                               CLE->getType(), CGF);
881      // FIXME: "Leaked" on failure.
882      if (C)
883        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
884                                     E->getType().isConstant(CGM.getContext()),
885                                     llvm::GlobalValue::InternalLinkage,
886                                     C, ".compoundliteral", 0,
887                                     llvm::GlobalVariable::NotThreadLocal,
888                          CGM.getContext().getTargetAddressSpace(E->getType()));
889      return C;
890    }
891    case Expr::StringLiteralClass:
892      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
893    case Expr::ObjCEncodeExprClass:
894      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
895    case Expr::ObjCStringLiteralClass: {
896      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
897      llvm::Constant *C =
898          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
899      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
900    }
901    case Expr::PredefinedExprClass: {
902      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
903      if (CGF) {
904        LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
905        return cast<llvm::Constant>(Res.getAddress());
906      } else if (Type == PredefinedExpr::PrettyFunction) {
907        return CGM.GetAddrOfConstantCString("top level", ".tmp");
908      }
909
910      return CGM.GetAddrOfConstantCString("", ".tmp");
911    }
912    case Expr::AddrLabelExprClass: {
913      assert(CGF && "Invalid address of label expression outside function.");
914      llvm::Constant *Ptr =
915        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
916      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
917    }
918    case Expr::CallExprClass: {
919      CallExpr* CE = cast<CallExpr>(E);
920      unsigned builtin = CE->isBuiltinCall();
921      if (builtin !=
922            Builtin::BI__builtin___CFStringMakeConstantString &&
923          builtin !=
924            Builtin::BI__builtin___NSStringMakeConstantString)
925        break;
926      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
927      const StringLiteral *Literal = cast<StringLiteral>(Arg);
928      if (builtin ==
929            Builtin::BI__builtin___NSStringMakeConstantString) {
930        return CGM.getObjCRuntime().GenerateConstantString(Literal);
931      }
932      // FIXME: need to deal with UCN conversion issues.
933      return CGM.GetAddrOfConstantCFString(Literal);
934    }
935    case Expr::BlockExprClass: {
936      std::string FunctionName;
937      if (CGF)
938        FunctionName = CGF->CurFn->getName();
939      else
940        FunctionName = "global";
941
942      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
943    }
944    case Expr::CXXTypeidExprClass: {
945      CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
946      QualType T;
947      if (Typeid->isTypeOperand())
948        T = Typeid->getTypeOperand(CGM.getContext());
949      else
950        T = Typeid->getExprOperand()->getType();
951      return CGM.GetAddrOfRTTIDescriptor(T);
952    }
953    case Expr::CXXUuidofExprClass: {
954      return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
955    }
956    case Expr::MaterializeTemporaryExprClass: {
957      MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
958      assert(MTE->getStorageDuration() == SD_Static);
959      SmallVector<const Expr *, 2> CommaLHSs;
960      SmallVector<SubobjectAdjustment, 2> Adjustments;
961      const Expr *Inner = MTE->GetTemporaryExpr()
962          ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
963      return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
964    }
965    }
966
967    return 0;
968  }
969};
970
971}  // end anonymous namespace.
972
973llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
974                                                CodeGenFunction *CGF) {
975  // Make a quick check if variable can be default NULL initialized
976  // and avoid going through rest of code which may do, for c++11,
977  // initialization of memory to all NULLs.
978  if (!D.hasLocalStorage()) {
979    QualType Ty = D.getType();
980    if (Ty->isArrayType())
981      Ty = Context.getBaseElementType(Ty);
982    if (Ty->isRecordType())
983      if (const CXXConstructExpr *E =
984          dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
985        const CXXConstructorDecl *CD = E->getConstructor();
986        if (CD->isTrivial() && CD->isDefaultConstructor())
987          return EmitNullConstant(D.getType());
988      }
989  }
990
991  if (const APValue *Value = D.evaluateValue())
992    return EmitConstantValueForMemory(*Value, D.getType(), CGF);
993
994  // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
995  // reference is a constant expression, and the reference binds to a temporary,
996  // then constant initialization is performed. ConstExprEmitter will
997  // incorrectly emit a prvalue constant in this case, and the calling code
998  // interprets that as the (pointer) value of the reference, rather than the
999  // desired value of the referee.
1000  if (D.getType()->isReferenceType())
1001    return 0;
1002
1003  const Expr *E = D.getInit();
1004  assert(E && "No initializer to emit");
1005
1006  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1007  if (C && C->getType()->isIntegerTy(1)) {
1008    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1009    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1010  }
1011  return C;
1012}
1013
1014llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
1015                                                QualType DestType,
1016                                                CodeGenFunction *CGF) {
1017  Expr::EvalResult Result;
1018
1019  bool Success = false;
1020
1021  if (DestType->isReferenceType())
1022    Success = E->EvaluateAsLValue(Result, Context);
1023  else
1024    Success = E->EvaluateAsRValue(Result, Context);
1025
1026  llvm::Constant *C = 0;
1027  if (Success && !Result.HasSideEffects)
1028    C = EmitConstantValue(Result.Val, DestType, CGF);
1029  else
1030    C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1031
1032  if (C && C->getType()->isIntegerTy(1)) {
1033    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1034    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1035  }
1036  return C;
1037}
1038
1039llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
1040                                                 QualType DestType,
1041                                                 CodeGenFunction *CGF) {
1042  switch (Value.getKind()) {
1043  case APValue::Uninitialized:
1044    llvm_unreachable("Constant expressions should be initialized.");
1045  case APValue::LValue: {
1046    llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
1047    llvm::Constant *Offset =
1048      llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
1049
1050    llvm::Constant *C;
1051    if (APValue::LValueBase LVBase = Value.getLValueBase()) {
1052      // An array can be represented as an lvalue referring to the base.
1053      if (isa<llvm::ArrayType>(DestTy)) {
1054        assert(Offset->isNullValue() && "offset on array initializer");
1055        return ConstExprEmitter(*this, CGF).Visit(
1056          const_cast<Expr*>(LVBase.get<const Expr*>()));
1057      }
1058
1059      C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
1060
1061      // Apply offset if necessary.
1062      if (!Offset->isNullValue()) {
1063        llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
1064        Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
1065        C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
1066      }
1067
1068      // Convert to the appropriate type; this could be an lvalue for
1069      // an integer.
1070      if (isa<llvm::PointerType>(DestTy))
1071        return llvm::ConstantExpr::getBitCast(C, DestTy);
1072
1073      return llvm::ConstantExpr::getPtrToInt(C, DestTy);
1074    } else {
1075      C = Offset;
1076
1077      // Convert to the appropriate type; this could be an lvalue for
1078      // an integer.
1079      if (isa<llvm::PointerType>(DestTy))
1080        return llvm::ConstantExpr::getIntToPtr(C, DestTy);
1081
1082      // If the types don't match this should only be a truncate.
1083      if (C->getType() != DestTy)
1084        return llvm::ConstantExpr::getTrunc(C, DestTy);
1085
1086      return C;
1087    }
1088  }
1089  case APValue::Int:
1090    return llvm::ConstantInt::get(VMContext, Value.getInt());
1091  case APValue::ComplexInt: {
1092    llvm::Constant *Complex[2];
1093
1094    Complex[0] = llvm::ConstantInt::get(VMContext,
1095                                        Value.getComplexIntReal());
1096    Complex[1] = llvm::ConstantInt::get(VMContext,
1097                                        Value.getComplexIntImag());
1098
1099    // FIXME: the target may want to specify that this is packed.
1100    llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1101                                                  Complex[1]->getType(),
1102                                                  NULL);
1103    return llvm::ConstantStruct::get(STy, Complex);
1104  }
1105  case APValue::Float: {
1106    const llvm::APFloat &Init = Value.getFloat();
1107    if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
1108         !Context.getLangOpts().NativeHalfType)
1109      return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
1110    else
1111      return llvm::ConstantFP::get(VMContext, Init);
1112  }
1113  case APValue::ComplexFloat: {
1114    llvm::Constant *Complex[2];
1115
1116    Complex[0] = llvm::ConstantFP::get(VMContext,
1117                                       Value.getComplexFloatReal());
1118    Complex[1] = llvm::ConstantFP::get(VMContext,
1119                                       Value.getComplexFloatImag());
1120
1121    // FIXME: the target may want to specify that this is packed.
1122    llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1123                                                  Complex[1]->getType(),
1124                                                  NULL);
1125    return llvm::ConstantStruct::get(STy, Complex);
1126  }
1127  case APValue::Vector: {
1128    SmallVector<llvm::Constant *, 4> Inits;
1129    unsigned NumElts = Value.getVectorLength();
1130
1131    for (unsigned i = 0; i != NumElts; ++i) {
1132      const APValue &Elt = Value.getVectorElt(i);
1133      if (Elt.isInt())
1134        Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1135      else
1136        Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1137    }
1138    return llvm::ConstantVector::get(Inits);
1139  }
1140  case APValue::AddrLabelDiff: {
1141    const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
1142    const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
1143    llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
1144    llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
1145
1146    // Compute difference
1147    llvm::Type *ResultType = getTypes().ConvertType(DestType);
1148    LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
1149    RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
1150    llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
1151
1152    // LLVM is a bit sensitive about the exact format of the
1153    // address-of-label difference; make sure to truncate after
1154    // the subtraction.
1155    return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
1156  }
1157  case APValue::Struct:
1158  case APValue::Union:
1159    return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
1160  case APValue::Array: {
1161    const ArrayType *CAT = Context.getAsArrayType(DestType);
1162    unsigned NumElements = Value.getArraySize();
1163    unsigned NumInitElts = Value.getArrayInitializedElts();
1164
1165    std::vector<llvm::Constant*> Elts;
1166    Elts.reserve(NumElements);
1167
1168    // Emit array filler, if there is one.
1169    llvm::Constant *Filler = 0;
1170    if (Value.hasArrayFiller())
1171      Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
1172                                          CAT->getElementType(), CGF);
1173
1174    // Emit initializer elements.
1175    llvm::Type *CommonElementType = 0;
1176    for (unsigned I = 0; I < NumElements; ++I) {
1177      llvm::Constant *C = Filler;
1178      if (I < NumInitElts)
1179        C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
1180                                       CAT->getElementType(), CGF);
1181      else
1182        assert(Filler && "Missing filler for implicit elements of initializer");
1183      if (I == 0)
1184        CommonElementType = C->getType();
1185      else if (C->getType() != CommonElementType)
1186        CommonElementType = 0;
1187      Elts.push_back(C);
1188    }
1189
1190    if (!CommonElementType) {
1191      // FIXME: Try to avoid packing the array
1192      std::vector<llvm::Type*> Types;
1193      Types.reserve(NumElements);
1194      for (unsigned i = 0, e = Elts.size(); i < e; ++i)
1195        Types.push_back(Elts[i]->getType());
1196      llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
1197      return llvm::ConstantStruct::get(SType, Elts);
1198    }
1199
1200    llvm::ArrayType *AType =
1201      llvm::ArrayType::get(CommonElementType, NumElements);
1202    return llvm::ConstantArray::get(AType, Elts);
1203  }
1204  case APValue::MemberPointer:
1205    return getCXXABI().EmitMemberPointer(Value, DestType);
1206  }
1207  llvm_unreachable("Unknown APValue kind");
1208}
1209
1210llvm::Constant *
1211CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
1212                                          QualType DestType,
1213                                          CodeGenFunction *CGF) {
1214  llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
1215  if (C->getType()->isIntegerTy(1)) {
1216    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
1217    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1218  }
1219  return C;
1220}
1221
1222llvm::Constant *
1223CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
1224  assert(E->isFileScope() && "not a file-scope compound literal expr");
1225  return ConstExprEmitter(*this, 0).EmitLValue(E);
1226}
1227
1228llvm::Constant *
1229CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1230  // Member pointer constants always have a very particular form.
1231  const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1232  const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1233
1234  // A member function pointer.
1235  if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1236    return getCXXABI().EmitMemberPointer(method);
1237
1238  // Otherwise, a member data pointer.
1239  uint64_t fieldOffset = getContext().getFieldOffset(decl);
1240  CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1241  return getCXXABI().EmitMemberDataPointer(type, chars);
1242}
1243
1244static void
1245FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
1246                             SmallVectorImpl<llvm::Constant *> &Elements,
1247                             uint64_t StartOffset) {
1248  assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
1249         "StartOffset not byte aligned!");
1250
1251  if (CGM.getTypes().isZeroInitializable(T))
1252    return;
1253
1254  if (const ConstantArrayType *CAT =
1255        CGM.getContext().getAsConstantArrayType(T)) {
1256    QualType ElementTy = CAT->getElementType();
1257    uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
1258
1259    for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
1260      FillInNullDataMemberPointers(CGM, ElementTy, Elements,
1261                                   StartOffset + I * ElementSize);
1262    }
1263  } else if (const RecordType *RT = T->getAs<RecordType>()) {
1264    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1265    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
1266
1267    // Go through all bases and fill in any null pointer to data members.
1268    for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1269         E = RD->bases_end(); I != E; ++I) {
1270      if (I->isVirtual()) {
1271        // Ignore virtual bases.
1272        continue;
1273      }
1274
1275      const CXXRecordDecl *BaseDecl =
1276      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1277
1278      // Ignore empty bases.
1279      if (BaseDecl->isEmpty())
1280        continue;
1281
1282      // Ignore bases that don't have any pointer to data members.
1283      if (CGM.getTypes().isZeroInitializable(BaseDecl))
1284        continue;
1285
1286      uint64_t BaseOffset =
1287        CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
1288      FillInNullDataMemberPointers(CGM, I->getType(),
1289                                   Elements, StartOffset + BaseOffset);
1290    }
1291
1292    // Visit all fields.
1293    unsigned FieldNo = 0;
1294    for (RecordDecl::field_iterator I = RD->field_begin(),
1295         E = RD->field_end(); I != E; ++I, ++FieldNo) {
1296      QualType FieldType = I->getType();
1297
1298      if (CGM.getTypes().isZeroInitializable(FieldType))
1299        continue;
1300
1301      uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1302      FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1303    }
1304  } else {
1305    assert(T->isMemberPointerType() && "Should only see member pointers here!");
1306    assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1307           "Should only see pointers to data members here!");
1308
1309    CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
1310    CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
1311
1312    // FIXME: hardcodes Itanium member pointer representation!
1313    llvm::Constant *NegativeOne =
1314      llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
1315
1316    // Fill in the null data member pointer.
1317    for (CharUnits I = StartIndex; I != EndIndex; ++I)
1318      Elements[I.getQuantity()] = NegativeOne;
1319  }
1320}
1321
1322static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1323                                               llvm::Type *baseType,
1324                                               const CXXRecordDecl *base);
1325
1326static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1327                                        const CXXRecordDecl *record,
1328                                        bool asCompleteObject) {
1329  const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1330  llvm::StructType *structure =
1331    (asCompleteObject ? layout.getLLVMType()
1332                      : layout.getBaseSubobjectLLVMType());
1333
1334  unsigned numElements = structure->getNumElements();
1335  std::vector<llvm::Constant *> elements(numElements);
1336
1337  // Fill in all the bases.
1338  for (CXXRecordDecl::base_class_const_iterator
1339         I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
1340    if (I->isVirtual()) {
1341      // Ignore virtual bases; if we're laying out for a complete
1342      // object, we'll lay these out later.
1343      continue;
1344    }
1345
1346    const CXXRecordDecl *base =
1347      cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1348
1349    // Ignore empty bases.
1350    if (base->isEmpty())
1351      continue;
1352
1353    unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1354    llvm::Type *baseType = structure->getElementType(fieldIndex);
1355    elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1356  }
1357
1358  // Fill in all the fields.
1359  for (RecordDecl::field_iterator I = record->field_begin(),
1360         E = record->field_end(); I != E; ++I) {
1361    const FieldDecl *field = *I;
1362
1363    // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
1364    // will fill in later.)
1365    if (!field->isBitField()) {
1366      unsigned fieldIndex = layout.getLLVMFieldNo(field);
1367      elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
1368    }
1369
1370    // For unions, stop after the first named field.
1371    if (record->isUnion() && field->getDeclName())
1372      break;
1373  }
1374
1375  // Fill in the virtual bases, if we're working with the complete object.
1376  if (asCompleteObject) {
1377    for (CXXRecordDecl::base_class_const_iterator
1378           I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
1379      const CXXRecordDecl *base =
1380        cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1381
1382      // Ignore empty bases.
1383      if (base->isEmpty())
1384        continue;
1385
1386      unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1387
1388      // We might have already laid this field out.
1389      if (elements[fieldIndex]) continue;
1390
1391      llvm::Type *baseType = structure->getElementType(fieldIndex);
1392      elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1393    }
1394  }
1395
1396  // Now go through all other fields and zero them out.
1397  for (unsigned i = 0; i != numElements; ++i) {
1398    if (!elements[i])
1399      elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1400  }
1401
1402  return llvm::ConstantStruct::get(structure, elements);
1403}
1404
1405/// Emit the null constant for a base subobject.
1406static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1407                                               llvm::Type *baseType,
1408                                               const CXXRecordDecl *base) {
1409  const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1410
1411  // Just zero out bases that don't have any pointer to data members.
1412  if (baseLayout.isZeroInitializableAsBase())
1413    return llvm::Constant::getNullValue(baseType);
1414
1415  // If the base type is a struct, we can just use its null constant.
1416  if (isa<llvm::StructType>(baseType)) {
1417    return EmitNullConstant(CGM, base, /*complete*/ false);
1418  }
1419
1420  // Otherwise, some bases are represented as arrays of i8 if the size
1421  // of the base is smaller than its corresponding LLVM type.  Figure
1422  // out how many elements this base array has.
1423  llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1424  unsigned numBaseElements = baseArrayType->getNumElements();
1425
1426  // Fill in null data member pointers.
1427  SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
1428  FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1429                               baseElements, 0);
1430
1431  // Now go through all other elements and zero them out.
1432  if (numBaseElements) {
1433    llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
1434    for (unsigned i = 0; i != numBaseElements; ++i) {
1435      if (!baseElements[i])
1436        baseElements[i] = i8_zero;
1437    }
1438  }
1439
1440  return llvm::ConstantArray::get(baseArrayType, baseElements);
1441}
1442
1443llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1444  if (getTypes().isZeroInitializable(T))
1445    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1446
1447  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1448    llvm::ArrayType *ATy =
1449      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1450
1451    QualType ElementTy = CAT->getElementType();
1452
1453    llvm::Constant *Element = EmitNullConstant(ElementTy);
1454    unsigned NumElements = CAT->getSize().getZExtValue();
1455
1456    if (Element->isNullValue())
1457      return llvm::ConstantAggregateZero::get(ATy);
1458
1459    SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
1460    return llvm::ConstantArray::get(ATy, Array);
1461  }
1462
1463  if (const RecordType *RT = T->getAs<RecordType>()) {
1464    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1465    return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1466  }
1467
1468  assert(T->isMemberPointerType() && "Should only see member pointers here!");
1469  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1470         "Should only see pointers to data members here!");
1471
1472  // Itanium C++ ABI 2.3:
1473  //   A NULL pointer is represented as -1.
1474  return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1475}
1476
1477llvm::Constant *
1478CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
1479  return ::EmitNullConstant(*this, Record, false);
1480}
1481