CGExprConstant.cpp revision 276479
1//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Constant Expr nodes as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCXXABI.h"
16#include "CGObjCRuntime.h"
17#include "CGRecordLayout.h"
18#include "CodeGenModule.h"
19#include "clang/AST/APValue.h"
20#include "clang/AST/ASTContext.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/Builtins.h"
24#include "llvm/IR/Constants.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/GlobalVariable.h"
28using namespace clang;
29using namespace CodeGen;
30
31//===----------------------------------------------------------------------===//
32//                            ConstStructBuilder
33//===----------------------------------------------------------------------===//
34
35namespace {
36class ConstStructBuilder {
37  CodeGenModule &CGM;
38  CodeGenFunction *CGF;
39
40  bool Packed;
41  CharUnits NextFieldOffsetInChars;
42  CharUnits LLVMStructAlignment;
43  SmallVector<llvm::Constant *, 32> Elements;
44public:
45  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46                                     InitListExpr *ILE);
47  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
48                                     const APValue &Value, QualType ValTy);
49
50private:
51  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
52    : CGM(CGM), CGF(CGF), Packed(false),
53    NextFieldOffsetInChars(CharUnits::Zero()),
54    LLVMStructAlignment(CharUnits::One()) { }
55
56  void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
57                   llvm::Constant *InitExpr);
58
59  void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
60
61  void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
62                      llvm::ConstantInt *InitExpr);
63
64  void AppendPadding(CharUnits PadSize);
65
66  void AppendTailPadding(CharUnits RecordSize);
67
68  void ConvertStructToPacked();
69
70  bool Build(InitListExpr *ILE);
71  void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
72             const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
73  llvm::Constant *Finalize(QualType Ty);
74
75  CharUnits getAlignment(const llvm::Constant *C) const {
76    if (Packed)  return CharUnits::One();
77    return CharUnits::fromQuantity(
78        CGM.getDataLayout().getABITypeAlignment(C->getType()));
79  }
80
81  CharUnits getSizeInChars(const llvm::Constant *C) const {
82    return CharUnits::fromQuantity(
83        CGM.getDataLayout().getTypeAllocSize(C->getType()));
84  }
85};
86
87void ConstStructBuilder::
88AppendField(const FieldDecl *Field, uint64_t FieldOffset,
89            llvm::Constant *InitCst) {
90  const ASTContext &Context = CGM.getContext();
91
92  CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
93
94  AppendBytes(FieldOffsetInChars, InitCst);
95}
96
97void ConstStructBuilder::
98AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
99
100  assert(NextFieldOffsetInChars <= FieldOffsetInChars
101         && "Field offset mismatch!");
102
103  CharUnits FieldAlignment = getAlignment(InitCst);
104
105  // Round up the field offset to the alignment of the field type.
106  CharUnits AlignedNextFieldOffsetInChars =
107    NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
108
109  if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
110    assert(!Packed && "Alignment is wrong even with a packed struct!");
111
112    // Convert the struct to a packed struct.
113    ConvertStructToPacked();
114
115    AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
116  }
117
118  if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
119    // We need to append padding.
120    AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
121
122    assert(NextFieldOffsetInChars == FieldOffsetInChars &&
123           "Did not add enough padding!");
124
125    AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
126  }
127
128  // Add the field.
129  Elements.push_back(InitCst);
130  NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
131                           getSizeInChars(InitCst);
132
133  if (Packed)
134    assert(LLVMStructAlignment == CharUnits::One() &&
135           "Packed struct not byte-aligned!");
136  else
137    LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
138}
139
140void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
141                                        uint64_t FieldOffset,
142                                        llvm::ConstantInt *CI) {
143  const ASTContext &Context = CGM.getContext();
144  const uint64_t CharWidth = Context.getCharWidth();
145  uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
146  if (FieldOffset > NextFieldOffsetInBits) {
147    // We need to add padding.
148    CharUnits PadSize = Context.toCharUnitsFromBits(
149      llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
150                               Context.getTargetInfo().getCharAlign()));
151
152    AppendPadding(PadSize);
153  }
154
155  uint64_t FieldSize = Field->getBitWidthValue(Context);
156
157  llvm::APInt FieldValue = CI->getValue();
158
159  // Promote the size of FieldValue if necessary
160  // FIXME: This should never occur, but currently it can because initializer
161  // constants are cast to bool, and because clang is not enforcing bitfield
162  // width limits.
163  if (FieldSize > FieldValue.getBitWidth())
164    FieldValue = FieldValue.zext(FieldSize);
165
166  // Truncate the size of FieldValue to the bit field size.
167  if (FieldSize < FieldValue.getBitWidth())
168    FieldValue = FieldValue.trunc(FieldSize);
169
170  NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
171  if (FieldOffset < NextFieldOffsetInBits) {
172    // Either part of the field or the entire field can go into the previous
173    // byte.
174    assert(!Elements.empty() && "Elements can't be empty!");
175
176    unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
177
178    bool FitsCompletelyInPreviousByte =
179      BitsInPreviousByte >= FieldValue.getBitWidth();
180
181    llvm::APInt Tmp = FieldValue;
182
183    if (!FitsCompletelyInPreviousByte) {
184      unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
185
186      if (CGM.getDataLayout().isBigEndian()) {
187        Tmp = Tmp.lshr(NewFieldWidth);
188        Tmp = Tmp.trunc(BitsInPreviousByte);
189
190        // We want the remaining high bits.
191        FieldValue = FieldValue.trunc(NewFieldWidth);
192      } else {
193        Tmp = Tmp.trunc(BitsInPreviousByte);
194
195        // We want the remaining low bits.
196        FieldValue = FieldValue.lshr(BitsInPreviousByte);
197        FieldValue = FieldValue.trunc(NewFieldWidth);
198      }
199    }
200
201    Tmp = Tmp.zext(CharWidth);
202    if (CGM.getDataLayout().isBigEndian()) {
203      if (FitsCompletelyInPreviousByte)
204        Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
205    } else {
206      Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
207    }
208
209    // 'or' in the bits that go into the previous byte.
210    llvm::Value *LastElt = Elements.back();
211    if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
212      Tmp |= Val->getValue();
213    else {
214      assert(isa<llvm::UndefValue>(LastElt));
215      // If there is an undef field that we're adding to, it can either be a
216      // scalar undef (in which case, we just replace it with our field) or it
217      // is an array.  If it is an array, we have to pull one byte off the
218      // array so that the other undef bytes stay around.
219      if (!isa<llvm::IntegerType>(LastElt->getType())) {
220        // The undef padding will be a multibyte array, create a new smaller
221        // padding and then an hole for our i8 to get plopped into.
222        assert(isa<llvm::ArrayType>(LastElt->getType()) &&
223               "Expected array padding of undefs");
224        llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
225        assert(AT->getElementType()->isIntegerTy(CharWidth) &&
226               AT->getNumElements() != 0 &&
227               "Expected non-empty array padding of undefs");
228
229        // Remove the padding array.
230        NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
231        Elements.pop_back();
232
233        // Add the padding back in two chunks.
234        AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
235        AppendPadding(CharUnits::One());
236        assert(isa<llvm::UndefValue>(Elements.back()) &&
237               Elements.back()->getType()->isIntegerTy(CharWidth) &&
238               "Padding addition didn't work right");
239      }
240    }
241
242    Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
243
244    if (FitsCompletelyInPreviousByte)
245      return;
246  }
247
248  while (FieldValue.getBitWidth() > CharWidth) {
249    llvm::APInt Tmp;
250
251    if (CGM.getDataLayout().isBigEndian()) {
252      // We want the high bits.
253      Tmp =
254        FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
255    } else {
256      // We want the low bits.
257      Tmp = FieldValue.trunc(CharWidth);
258
259      FieldValue = FieldValue.lshr(CharWidth);
260    }
261
262    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
263    ++NextFieldOffsetInChars;
264
265    FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
266  }
267
268  assert(FieldValue.getBitWidth() > 0 &&
269         "Should have at least one bit left!");
270  assert(FieldValue.getBitWidth() <= CharWidth &&
271         "Should not have more than a byte left!");
272
273  if (FieldValue.getBitWidth() < CharWidth) {
274    if (CGM.getDataLayout().isBigEndian()) {
275      unsigned BitWidth = FieldValue.getBitWidth();
276
277      FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
278    } else
279      FieldValue = FieldValue.zext(CharWidth);
280  }
281
282  // Append the last element.
283  Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
284                                            FieldValue));
285  ++NextFieldOffsetInChars;
286}
287
288void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
289  if (PadSize.isZero())
290    return;
291
292  llvm::Type *Ty = CGM.Int8Ty;
293  if (PadSize > CharUnits::One())
294    Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
295
296  llvm::Constant *C = llvm::UndefValue::get(Ty);
297  Elements.push_back(C);
298  assert(getAlignment(C) == CharUnits::One() &&
299         "Padding must have 1 byte alignment!");
300
301  NextFieldOffsetInChars += getSizeInChars(C);
302}
303
304void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
305  assert(NextFieldOffsetInChars <= RecordSize &&
306         "Size mismatch!");
307
308  AppendPadding(RecordSize - NextFieldOffsetInChars);
309}
310
311void ConstStructBuilder::ConvertStructToPacked() {
312  SmallVector<llvm::Constant *, 16> PackedElements;
313  CharUnits ElementOffsetInChars = CharUnits::Zero();
314
315  for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
316    llvm::Constant *C = Elements[i];
317
318    CharUnits ElementAlign = CharUnits::fromQuantity(
319      CGM.getDataLayout().getABITypeAlignment(C->getType()));
320    CharUnits AlignedElementOffsetInChars =
321      ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
322
323    if (AlignedElementOffsetInChars > ElementOffsetInChars) {
324      // We need some padding.
325      CharUnits NumChars =
326        AlignedElementOffsetInChars - ElementOffsetInChars;
327
328      llvm::Type *Ty = CGM.Int8Ty;
329      if (NumChars > CharUnits::One())
330        Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
331
332      llvm::Constant *Padding = llvm::UndefValue::get(Ty);
333      PackedElements.push_back(Padding);
334      ElementOffsetInChars += getSizeInChars(Padding);
335    }
336
337    PackedElements.push_back(C);
338    ElementOffsetInChars += getSizeInChars(C);
339  }
340
341  assert(ElementOffsetInChars == NextFieldOffsetInChars &&
342         "Packing the struct changed its size!");
343
344  Elements.swap(PackedElements);
345  LLVMStructAlignment = CharUnits::One();
346  Packed = true;
347}
348
349bool ConstStructBuilder::Build(InitListExpr *ILE) {
350  RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
351  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
352
353  unsigned FieldNo = 0;
354  unsigned ElementNo = 0;
355
356  for (RecordDecl::field_iterator Field = RD->field_begin(),
357       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
358    // If this is a union, skip all the fields that aren't being initialized.
359    if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
360      continue;
361
362    // Don't emit anonymous bitfields, they just affect layout.
363    if (Field->isUnnamedBitfield())
364      continue;
365
366    // Get the initializer.  A struct can include fields without initializers,
367    // we just use explicit null values for them.
368    llvm::Constant *EltInit;
369    if (ElementNo < ILE->getNumInits())
370      EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
371                                     Field->getType(), CGF);
372    else
373      EltInit = CGM.EmitNullConstant(Field->getType());
374
375    if (!EltInit)
376      return false;
377
378    if (!Field->isBitField()) {
379      // Handle non-bitfield members.
380      AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
381    } else {
382      // Otherwise we have a bitfield.
383      AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
384                     cast<llvm::ConstantInt>(EltInit));
385    }
386  }
387
388  return true;
389}
390
391namespace {
392struct BaseInfo {
393  BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
394    : Decl(Decl), Offset(Offset), Index(Index) {
395  }
396
397  const CXXRecordDecl *Decl;
398  CharUnits Offset;
399  unsigned Index;
400
401  bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
402};
403}
404
405void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
406                               bool IsPrimaryBase,
407                               const CXXRecordDecl *VTableClass,
408                               CharUnits Offset) {
409  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
410
411  if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
412    // Add a vtable pointer, if we need one and it hasn't already been added.
413    if (CD->isDynamicClass() && !IsPrimaryBase) {
414      llvm::Constant *VTableAddressPoint =
415          CGM.getCXXABI().getVTableAddressPointForConstExpr(
416              BaseSubobject(CD, Offset), VTableClass);
417      AppendBytes(Offset, VTableAddressPoint);
418    }
419
420    // Accumulate and sort bases, in order to visit them in address order, which
421    // may not be the same as declaration order.
422    SmallVector<BaseInfo, 8> Bases;
423    Bases.reserve(CD->getNumBases());
424    unsigned BaseNo = 0;
425    for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
426         BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
427      assert(!Base->isVirtual() && "should not have virtual bases here");
428      const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
429      CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
430      Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
431    }
432    std::stable_sort(Bases.begin(), Bases.end());
433
434    for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
435      BaseInfo &Base = Bases[I];
436
437      bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
438      Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
439            VTableClass, Offset + Base.Offset);
440    }
441  }
442
443  unsigned FieldNo = 0;
444  uint64_t OffsetBits = CGM.getContext().toBits(Offset);
445
446  for (RecordDecl::field_iterator Field = RD->field_begin(),
447       FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
448    // If this is a union, skip all the fields that aren't being initialized.
449    if (RD->isUnion() && Val.getUnionField() != *Field)
450      continue;
451
452    // Don't emit anonymous bitfields, they just affect layout.
453    if (Field->isUnnamedBitfield())
454      continue;
455
456    // Emit the value of the initializer.
457    const APValue &FieldValue =
458      RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
459    llvm::Constant *EltInit =
460      CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
461    assert(EltInit && "EmitConstantValue can't fail");
462
463    if (!Field->isBitField()) {
464      // Handle non-bitfield members.
465      AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
466    } else {
467      // Otherwise we have a bitfield.
468      AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
469                     cast<llvm::ConstantInt>(EltInit));
470    }
471  }
472}
473
474llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
475  RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
476  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
477
478  CharUnits LayoutSizeInChars = Layout.getSize();
479
480  if (NextFieldOffsetInChars > LayoutSizeInChars) {
481    // If the struct is bigger than the size of the record type,
482    // we must have a flexible array member at the end.
483    assert(RD->hasFlexibleArrayMember() &&
484           "Must have flexible array member if struct is bigger than type!");
485
486    // No tail padding is necessary.
487  } else {
488    // Append tail padding if necessary.
489    AppendTailPadding(LayoutSizeInChars);
490
491    CharUnits LLVMSizeInChars =
492      NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
493
494    // Check if we need to convert the struct to a packed struct.
495    if (NextFieldOffsetInChars <= LayoutSizeInChars &&
496        LLVMSizeInChars > LayoutSizeInChars) {
497      assert(!Packed && "Size mismatch!");
498
499      ConvertStructToPacked();
500      assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
501             "Converting to packed did not help!");
502    }
503
504    assert(LayoutSizeInChars == NextFieldOffsetInChars &&
505           "Tail padding mismatch!");
506  }
507
508  // Pick the type to use.  If the type is layout identical to the ConvertType
509  // type then use it, otherwise use whatever the builder produced for us.
510  llvm::StructType *STy =
511      llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
512                                               Elements, Packed);
513  llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
514  if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
515    if (ValSTy->isLayoutIdentical(STy))
516      STy = ValSTy;
517  }
518
519  llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
520
521  assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
522         getSizeInChars(Result) && "Size mismatch!");
523
524  return Result;
525}
526
527llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
528                                                CodeGenFunction *CGF,
529                                                InitListExpr *ILE) {
530  ConstStructBuilder Builder(CGM, CGF);
531
532  if (!Builder.Build(ILE))
533    return nullptr;
534
535  return Builder.Finalize(ILE->getType());
536}
537
538llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
539                                                CodeGenFunction *CGF,
540                                                const APValue &Val,
541                                                QualType ValTy) {
542  ConstStructBuilder Builder(CGM, CGF);
543
544  const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
545  const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
546  Builder.Build(Val, RD, false, CD, CharUnits::Zero());
547
548  return Builder.Finalize(ValTy);
549}
550
551
552//===----------------------------------------------------------------------===//
553//                             ConstExprEmitter
554//===----------------------------------------------------------------------===//
555
556/// This class only needs to handle two cases:
557/// 1) Literals (this is used by APValue emission to emit literals).
558/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
559///    constant fold these types).
560class ConstExprEmitter :
561  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
562  CodeGenModule &CGM;
563  CodeGenFunction *CGF;
564  llvm::LLVMContext &VMContext;
565public:
566  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
567    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
568  }
569
570  //===--------------------------------------------------------------------===//
571  //                            Visitor Methods
572  //===--------------------------------------------------------------------===//
573
574  llvm::Constant *VisitStmt(Stmt *S) {
575    return nullptr;
576  }
577
578  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
579    return Visit(PE->getSubExpr());
580  }
581
582  llvm::Constant *
583  VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
584    return Visit(PE->getReplacement());
585  }
586
587  llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
588    return Visit(GE->getResultExpr());
589  }
590
591  llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
592    return Visit(CE->getChosenSubExpr());
593  }
594
595  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
596    return Visit(E->getInitializer());
597  }
598
599  llvm::Constant *VisitCastExpr(CastExpr* E) {
600    Expr *subExpr = E->getSubExpr();
601    llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
602    if (!C) return nullptr;
603
604    llvm::Type *destType = ConvertType(E->getType());
605
606    switch (E->getCastKind()) {
607    case CK_ToUnion: {
608      // GCC cast to union extension
609      assert(E->getType()->isUnionType() &&
610             "Destination type is not union type!");
611
612      // Build a struct with the union sub-element as the first member,
613      // and padded to the appropriate size
614      SmallVector<llvm::Constant*, 2> Elts;
615      SmallVector<llvm::Type*, 2> Types;
616      Elts.push_back(C);
617      Types.push_back(C->getType());
618      unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType());
619      unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType);
620
621      assert(CurSize <= TotalSize && "Union size mismatch!");
622      if (unsigned NumPadBytes = TotalSize - CurSize) {
623        llvm::Type *Ty = CGM.Int8Ty;
624        if (NumPadBytes > 1)
625          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
626
627        Elts.push_back(llvm::UndefValue::get(Ty));
628        Types.push_back(Ty);
629      }
630
631      llvm::StructType* STy =
632        llvm::StructType::get(C->getType()->getContext(), Types, false);
633      return llvm::ConstantStruct::get(STy, Elts);
634    }
635
636    case CK_AddressSpaceConversion:
637      return llvm::ConstantExpr::getAddrSpaceCast(C, destType);
638
639    case CK_LValueToRValue:
640    case CK_AtomicToNonAtomic:
641    case CK_NonAtomicToAtomic:
642    case CK_NoOp:
643    case CK_ConstructorConversion:
644      return C;
645
646    case CK_Dependent: llvm_unreachable("saw dependent cast!");
647
648    case CK_BuiltinFnToFnPtr:
649      llvm_unreachable("builtin functions are handled elsewhere");
650
651    case CK_ReinterpretMemberPointer:
652    case CK_DerivedToBaseMemberPointer:
653    case CK_BaseToDerivedMemberPointer:
654      return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
655
656    // These will never be supported.
657    case CK_ObjCObjectLValueCast:
658    case CK_ARCProduceObject:
659    case CK_ARCConsumeObject:
660    case CK_ARCReclaimReturnedObject:
661    case CK_ARCExtendBlockObject:
662    case CK_CopyAndAutoreleaseBlockObject:
663      return nullptr;
664
665    // These don't need to be handled here because Evaluate knows how to
666    // evaluate them in the cases where they can be folded.
667    case CK_BitCast:
668    case CK_ToVoid:
669    case CK_Dynamic:
670    case CK_LValueBitCast:
671    case CK_NullToMemberPointer:
672    case CK_UserDefinedConversion:
673    case CK_CPointerToObjCPointerCast:
674    case CK_BlockPointerToObjCPointerCast:
675    case CK_AnyPointerToBlockPointerCast:
676    case CK_ArrayToPointerDecay:
677    case CK_FunctionToPointerDecay:
678    case CK_BaseToDerived:
679    case CK_DerivedToBase:
680    case CK_UncheckedDerivedToBase:
681    case CK_MemberPointerToBoolean:
682    case CK_VectorSplat:
683    case CK_FloatingRealToComplex:
684    case CK_FloatingComplexToReal:
685    case CK_FloatingComplexToBoolean:
686    case CK_FloatingComplexCast:
687    case CK_FloatingComplexToIntegralComplex:
688    case CK_IntegralRealToComplex:
689    case CK_IntegralComplexToReal:
690    case CK_IntegralComplexToBoolean:
691    case CK_IntegralComplexCast:
692    case CK_IntegralComplexToFloatingComplex:
693    case CK_PointerToIntegral:
694    case CK_PointerToBoolean:
695    case CK_NullToPointer:
696    case CK_IntegralCast:
697    case CK_IntegralToPointer:
698    case CK_IntegralToBoolean:
699    case CK_IntegralToFloating:
700    case CK_FloatingToIntegral:
701    case CK_FloatingToBoolean:
702    case CK_FloatingCast:
703    case CK_ZeroToOCLEvent:
704      return nullptr;
705    }
706    llvm_unreachable("Invalid CastKind");
707  }
708
709  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
710    return Visit(DAE->getExpr());
711  }
712
713  llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
714    // No need for a DefaultInitExprScope: we don't handle 'this' in a
715    // constant expression.
716    return Visit(DIE->getExpr());
717  }
718
719  llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
720    return Visit(E->GetTemporaryExpr());
721  }
722
723  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
724    if (ILE->isStringLiteralInit())
725      return Visit(ILE->getInit(0));
726
727    llvm::ArrayType *AType =
728        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
729    llvm::Type *ElemTy = AType->getElementType();
730    unsigned NumInitElements = ILE->getNumInits();
731    unsigned NumElements = AType->getNumElements();
732
733    // Initialising an array requires us to automatically
734    // initialise any elements that have not been initialised explicitly
735    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
736
737    // Copy initializer elements.
738    std::vector<llvm::Constant*> Elts;
739    Elts.reserve(NumInitableElts + NumElements);
740
741    bool RewriteType = false;
742    for (unsigned i = 0; i < NumInitableElts; ++i) {
743      Expr *Init = ILE->getInit(i);
744      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
745      if (!C)
746        return nullptr;
747      RewriteType |= (C->getType() != ElemTy);
748      Elts.push_back(C);
749    }
750
751    // Initialize remaining array elements.
752    // FIXME: This doesn't handle member pointers correctly!
753    llvm::Constant *fillC;
754    if (Expr *filler = ILE->getArrayFiller())
755      fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
756    else
757      fillC = llvm::Constant::getNullValue(ElemTy);
758    if (!fillC)
759      return nullptr;
760    RewriteType |= (fillC->getType() != ElemTy);
761    Elts.resize(NumElements, fillC);
762
763    if (RewriteType) {
764      // FIXME: Try to avoid packing the array
765      std::vector<llvm::Type*> Types;
766      Types.reserve(NumInitableElts + NumElements);
767      for (unsigned i = 0, e = Elts.size(); i < e; ++i)
768        Types.push_back(Elts[i]->getType());
769      llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
770                                                            Types, true);
771      return llvm::ConstantStruct::get(SType, Elts);
772    }
773
774    return llvm::ConstantArray::get(AType, Elts);
775  }
776
777  llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) {
778    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
779  }
780
781  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
782    return CGM.EmitNullConstant(E->getType());
783  }
784
785  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
786    if (ILE->getType()->isArrayType())
787      return EmitArrayInitialization(ILE);
788
789    if (ILE->getType()->isRecordType())
790      return EmitRecordInitialization(ILE);
791
792    return nullptr;
793  }
794
795  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
796    if (!E->getConstructor()->isTrivial())
797      return nullptr;
798
799    QualType Ty = E->getType();
800
801    // FIXME: We should not have to call getBaseElementType here.
802    const RecordType *RT =
803      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
804    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
805
806    // If the class doesn't have a trivial destructor, we can't emit it as a
807    // constant expr.
808    if (!RD->hasTrivialDestructor())
809      return nullptr;
810
811    // Only copy and default constructors can be trivial.
812
813
814    if (E->getNumArgs()) {
815      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
816      assert(E->getConstructor()->isCopyOrMoveConstructor() &&
817             "trivial ctor has argument but isn't a copy/move ctor");
818
819      Expr *Arg = E->getArg(0);
820      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
821             "argument to copy ctor is of wrong type");
822
823      return Visit(Arg);
824    }
825
826    return CGM.EmitNullConstant(Ty);
827  }
828
829  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
830    return CGM.GetConstantArrayFromStringLiteral(E);
831  }
832
833  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
834    // This must be an @encode initializing an array in a static initializer.
835    // Don't emit it as the address of the string, emit the string data itself
836    // as an inline array.
837    std::string Str;
838    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
839    QualType T = E->getType();
840    if (T->getTypeClass() == Type::TypeOfExpr)
841      T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
842    const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
843
844    // Resize the string to the right size, adding zeros at the end, or
845    // truncating as needed.
846    Str.resize(CAT->getSize().getZExtValue(), '\0');
847    return llvm::ConstantDataArray::getString(VMContext, Str, false);
848  }
849
850  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
851    return Visit(E->getSubExpr());
852  }
853
854  // Utility methods
855  llvm::Type *ConvertType(QualType T) {
856    return CGM.getTypes().ConvertType(T);
857  }
858
859public:
860  llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
861    if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
862      if (Decl->hasAttr<WeakRefAttr>())
863        return CGM.GetWeakRefReference(Decl);
864      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
865        return CGM.GetAddrOfFunction(FD);
866      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
867        // We can never refer to a variable with local storage.
868        if (!VD->hasLocalStorage()) {
869          if (VD->isFileVarDecl() || VD->hasExternalStorage())
870            return CGM.GetAddrOfGlobalVar(VD);
871          else if (VD->isLocalVarDecl())
872            return CGM.getStaticLocalDeclAddress(VD);
873        }
874      }
875      return nullptr;
876    }
877
878    Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
879    switch (E->getStmtClass()) {
880    default: break;
881    case Expr::CompoundLiteralExprClass: {
882      // Note that due to the nature of compound literals, this is guaranteed
883      // to be the only use of the variable, so we just generate it here.
884      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
885      llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
886                                               CLE->getType(), CGF);
887      // FIXME: "Leaked" on failure.
888      if (C)
889        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
890                                     E->getType().isConstant(CGM.getContext()),
891                                     llvm::GlobalValue::InternalLinkage,
892                                     C, ".compoundliteral", nullptr,
893                                     llvm::GlobalVariable::NotThreadLocal,
894                          CGM.getContext().getTargetAddressSpace(E->getType()));
895      return C;
896    }
897    case Expr::StringLiteralClass:
898      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
899    case Expr::ObjCEncodeExprClass:
900      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
901    case Expr::ObjCStringLiteralClass: {
902      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
903      llvm::Constant *C =
904          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
905      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
906    }
907    case Expr::PredefinedExprClass: {
908      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
909      if (CGF) {
910        LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
911        return cast<llvm::Constant>(Res.getAddress());
912      } else if (Type == PredefinedExpr::PrettyFunction) {
913        return CGM.GetAddrOfConstantCString("top level", ".tmp");
914      }
915
916      return CGM.GetAddrOfConstantCString("", ".tmp");
917    }
918    case Expr::AddrLabelExprClass: {
919      assert(CGF && "Invalid address of label expression outside function.");
920      llvm::Constant *Ptr =
921        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
922      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
923    }
924    case Expr::CallExprClass: {
925      CallExpr* CE = cast<CallExpr>(E);
926      unsigned builtin = CE->getBuiltinCallee();
927      if (builtin !=
928            Builtin::BI__builtin___CFStringMakeConstantString &&
929          builtin !=
930            Builtin::BI__builtin___NSStringMakeConstantString)
931        break;
932      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
933      const StringLiteral *Literal = cast<StringLiteral>(Arg);
934      if (builtin ==
935            Builtin::BI__builtin___NSStringMakeConstantString) {
936        return CGM.getObjCRuntime().GenerateConstantString(Literal);
937      }
938      // FIXME: need to deal with UCN conversion issues.
939      return CGM.GetAddrOfConstantCFString(Literal);
940    }
941    case Expr::BlockExprClass: {
942      std::string FunctionName;
943      if (CGF)
944        FunctionName = CGF->CurFn->getName();
945      else
946        FunctionName = "global";
947
948      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
949    }
950    case Expr::CXXTypeidExprClass: {
951      CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
952      QualType T;
953      if (Typeid->isTypeOperand())
954        T = Typeid->getTypeOperand(CGM.getContext());
955      else
956        T = Typeid->getExprOperand()->getType();
957      return CGM.GetAddrOfRTTIDescriptor(T);
958    }
959    case Expr::CXXUuidofExprClass: {
960      return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
961    }
962    case Expr::MaterializeTemporaryExprClass: {
963      MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
964      assert(MTE->getStorageDuration() == SD_Static);
965      SmallVector<const Expr *, 2> CommaLHSs;
966      SmallVector<SubobjectAdjustment, 2> Adjustments;
967      const Expr *Inner = MTE->GetTemporaryExpr()
968          ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
969      return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
970    }
971    }
972
973    return nullptr;
974  }
975};
976
977}  // end anonymous namespace.
978
979llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
980                                                CodeGenFunction *CGF) {
981  // Make a quick check if variable can be default NULL initialized
982  // and avoid going through rest of code which may do, for c++11,
983  // initialization of memory to all NULLs.
984  if (!D.hasLocalStorage()) {
985    QualType Ty = D.getType();
986    if (Ty->isArrayType())
987      Ty = Context.getBaseElementType(Ty);
988    if (Ty->isRecordType())
989      if (const CXXConstructExpr *E =
990          dyn_cast_or_null<CXXConstructExpr>(D.getInit())) {
991        const CXXConstructorDecl *CD = E->getConstructor();
992        if (CD->isTrivial() && CD->isDefaultConstructor())
993          return EmitNullConstant(D.getType());
994      }
995  }
996
997  if (const APValue *Value = D.evaluateValue())
998    return EmitConstantValueForMemory(*Value, D.getType(), CGF);
999
1000  // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
1001  // reference is a constant expression, and the reference binds to a temporary,
1002  // then constant initialization is performed. ConstExprEmitter will
1003  // incorrectly emit a prvalue constant in this case, and the calling code
1004  // interprets that as the (pointer) value of the reference, rather than the
1005  // desired value of the referee.
1006  if (D.getType()->isReferenceType())
1007    return nullptr;
1008
1009  const Expr *E = D.getInit();
1010  assert(E && "No initializer to emit");
1011
1012  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1013  if (C && C->getType()->isIntegerTy(1)) {
1014    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1015    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1016  }
1017  return C;
1018}
1019
1020llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
1021                                                QualType DestType,
1022                                                CodeGenFunction *CGF) {
1023  Expr::EvalResult Result;
1024
1025  bool Success = false;
1026
1027  if (DestType->isReferenceType())
1028    Success = E->EvaluateAsLValue(Result, Context);
1029  else
1030    Success = E->EvaluateAsRValue(Result, Context);
1031
1032  llvm::Constant *C = nullptr;
1033  if (Success && !Result.HasSideEffects)
1034    C = EmitConstantValue(Result.Val, DestType, CGF);
1035  else
1036    C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1037
1038  if (C && C->getType()->isIntegerTy(1)) {
1039    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1040    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1041  }
1042  return C;
1043}
1044
1045llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
1046                                                 QualType DestType,
1047                                                 CodeGenFunction *CGF) {
1048  // For an _Atomic-qualified constant, we may need to add tail padding.
1049  if (auto *AT = DestType->getAs<AtomicType>()) {
1050    QualType InnerType = AT->getValueType();
1051    auto *Inner = EmitConstantValue(Value, InnerType, CGF);
1052
1053    uint64_t InnerSize = Context.getTypeSize(InnerType);
1054    uint64_t OuterSize = Context.getTypeSize(DestType);
1055    if (InnerSize == OuterSize)
1056      return Inner;
1057
1058    assert(InnerSize < OuterSize && "emitted over-large constant for atomic");
1059    llvm::Constant *Elts[] = {
1060      Inner,
1061      llvm::ConstantAggregateZero::get(
1062          llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8))
1063    };
1064    return llvm::ConstantStruct::getAnon(Elts);
1065  }
1066
1067  switch (Value.getKind()) {
1068  case APValue::Uninitialized:
1069    llvm_unreachable("Constant expressions should be initialized.");
1070  case APValue::LValue: {
1071    llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
1072    llvm::Constant *Offset =
1073      llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
1074
1075    llvm::Constant *C;
1076    if (APValue::LValueBase LVBase = Value.getLValueBase()) {
1077      // An array can be represented as an lvalue referring to the base.
1078      if (isa<llvm::ArrayType>(DestTy)) {
1079        assert(Offset->isNullValue() && "offset on array initializer");
1080        return ConstExprEmitter(*this, CGF).Visit(
1081          const_cast<Expr*>(LVBase.get<const Expr*>()));
1082      }
1083
1084      C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
1085
1086      // Apply offset if necessary.
1087      if (!Offset->isNullValue()) {
1088        unsigned AS = C->getType()->getPointerAddressSpace();
1089        llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS);
1090        llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy);
1091        Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
1092        C = llvm::ConstantExpr::getPointerCast(Casted, C->getType());
1093      }
1094
1095      // Convert to the appropriate type; this could be an lvalue for
1096      // an integer.
1097      if (isa<llvm::PointerType>(DestTy))
1098        return llvm::ConstantExpr::getPointerCast(C, DestTy);
1099
1100      return llvm::ConstantExpr::getPtrToInt(C, DestTy);
1101    } else {
1102      C = Offset;
1103
1104      // Convert to the appropriate type; this could be an lvalue for
1105      // an integer.
1106      if (isa<llvm::PointerType>(DestTy))
1107        return llvm::ConstantExpr::getIntToPtr(C, DestTy);
1108
1109      // If the types don't match this should only be a truncate.
1110      if (C->getType() != DestTy)
1111        return llvm::ConstantExpr::getTrunc(C, DestTy);
1112
1113      return C;
1114    }
1115  }
1116  case APValue::Int:
1117    return llvm::ConstantInt::get(VMContext, Value.getInt());
1118  case APValue::ComplexInt: {
1119    llvm::Constant *Complex[2];
1120
1121    Complex[0] = llvm::ConstantInt::get(VMContext,
1122                                        Value.getComplexIntReal());
1123    Complex[1] = llvm::ConstantInt::get(VMContext,
1124                                        Value.getComplexIntImag());
1125
1126    // FIXME: the target may want to specify that this is packed.
1127    llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1128                                                  Complex[1]->getType(),
1129                                                  NULL);
1130    return llvm::ConstantStruct::get(STy, Complex);
1131  }
1132  case APValue::Float: {
1133    const llvm::APFloat &Init = Value.getFloat();
1134    if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
1135         !Context.getLangOpts().NativeHalfType)
1136      return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
1137    else
1138      return llvm::ConstantFP::get(VMContext, Init);
1139  }
1140  case APValue::ComplexFloat: {
1141    llvm::Constant *Complex[2];
1142
1143    Complex[0] = llvm::ConstantFP::get(VMContext,
1144                                       Value.getComplexFloatReal());
1145    Complex[1] = llvm::ConstantFP::get(VMContext,
1146                                       Value.getComplexFloatImag());
1147
1148    // FIXME: the target may want to specify that this is packed.
1149    llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1150                                                  Complex[1]->getType(),
1151                                                  NULL);
1152    return llvm::ConstantStruct::get(STy, Complex);
1153  }
1154  case APValue::Vector: {
1155    SmallVector<llvm::Constant *, 4> Inits;
1156    unsigned NumElts = Value.getVectorLength();
1157
1158    for (unsigned i = 0; i != NumElts; ++i) {
1159      const APValue &Elt = Value.getVectorElt(i);
1160      if (Elt.isInt())
1161        Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1162      else
1163        Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1164    }
1165    return llvm::ConstantVector::get(Inits);
1166  }
1167  case APValue::AddrLabelDiff: {
1168    const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
1169    const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
1170    llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
1171    llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
1172
1173    // Compute difference
1174    llvm::Type *ResultType = getTypes().ConvertType(DestType);
1175    LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
1176    RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
1177    llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
1178
1179    // LLVM is a bit sensitive about the exact format of the
1180    // address-of-label difference; make sure to truncate after
1181    // the subtraction.
1182    return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
1183  }
1184  case APValue::Struct:
1185  case APValue::Union:
1186    return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
1187  case APValue::Array: {
1188    const ArrayType *CAT = Context.getAsArrayType(DestType);
1189    unsigned NumElements = Value.getArraySize();
1190    unsigned NumInitElts = Value.getArrayInitializedElts();
1191
1192    std::vector<llvm::Constant*> Elts;
1193    Elts.reserve(NumElements);
1194
1195    // Emit array filler, if there is one.
1196    llvm::Constant *Filler = nullptr;
1197    if (Value.hasArrayFiller())
1198      Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
1199                                          CAT->getElementType(), CGF);
1200
1201    // Emit initializer elements.
1202    llvm::Type *CommonElementType = nullptr;
1203    for (unsigned I = 0; I < NumElements; ++I) {
1204      llvm::Constant *C = Filler;
1205      if (I < NumInitElts)
1206        C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
1207                                       CAT->getElementType(), CGF);
1208      else
1209        assert(Filler && "Missing filler for implicit elements of initializer");
1210      if (I == 0)
1211        CommonElementType = C->getType();
1212      else if (C->getType() != CommonElementType)
1213        CommonElementType = nullptr;
1214      Elts.push_back(C);
1215    }
1216
1217    if (!CommonElementType) {
1218      // FIXME: Try to avoid packing the array
1219      std::vector<llvm::Type*> Types;
1220      Types.reserve(NumElements);
1221      for (unsigned i = 0, e = Elts.size(); i < e; ++i)
1222        Types.push_back(Elts[i]->getType());
1223      llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
1224      return llvm::ConstantStruct::get(SType, Elts);
1225    }
1226
1227    llvm::ArrayType *AType =
1228      llvm::ArrayType::get(CommonElementType, NumElements);
1229    return llvm::ConstantArray::get(AType, Elts);
1230  }
1231  case APValue::MemberPointer:
1232    return getCXXABI().EmitMemberPointer(Value, DestType);
1233  }
1234  llvm_unreachable("Unknown APValue kind");
1235}
1236
1237llvm::Constant *
1238CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
1239                                          QualType DestType,
1240                                          CodeGenFunction *CGF) {
1241  llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
1242  if (C->getType()->isIntegerTy(1)) {
1243    llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
1244    C = llvm::ConstantExpr::getZExt(C, BoolTy);
1245  }
1246  return C;
1247}
1248
1249llvm::Constant *
1250CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
1251  assert(E->isFileScope() && "not a file-scope compound literal expr");
1252  return ConstExprEmitter(*this, nullptr).EmitLValue(E);
1253}
1254
1255llvm::Constant *
1256CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1257  // Member pointer constants always have a very particular form.
1258  const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1259  const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1260
1261  // A member function pointer.
1262  if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1263    return getCXXABI().EmitMemberPointer(method);
1264
1265  // Otherwise, a member data pointer.
1266  uint64_t fieldOffset = getContext().getFieldOffset(decl);
1267  CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1268  return getCXXABI().EmitMemberDataPointer(type, chars);
1269}
1270
1271static void
1272FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
1273                             SmallVectorImpl<llvm::Constant *> &Elements,
1274                             uint64_t StartOffset) {
1275  assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
1276         "StartOffset not byte aligned!");
1277
1278  if (CGM.getTypes().isZeroInitializable(T))
1279    return;
1280
1281  if (const ConstantArrayType *CAT =
1282        CGM.getContext().getAsConstantArrayType(T)) {
1283    QualType ElementTy = CAT->getElementType();
1284    uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
1285
1286    for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
1287      FillInNullDataMemberPointers(CGM, ElementTy, Elements,
1288                                   StartOffset + I * ElementSize);
1289    }
1290  } else if (const RecordType *RT = T->getAs<RecordType>()) {
1291    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1292    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
1293
1294    // Go through all bases and fill in any null pointer to data members.
1295    for (const auto &I : RD->bases()) {
1296      if (I.isVirtual()) {
1297        // Ignore virtual bases.
1298        continue;
1299      }
1300
1301      const CXXRecordDecl *BaseDecl =
1302      cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1303
1304      // Ignore empty bases.
1305      if (BaseDecl->isEmpty())
1306        continue;
1307
1308      // Ignore bases that don't have any pointer to data members.
1309      if (CGM.getTypes().isZeroInitializable(BaseDecl))
1310        continue;
1311
1312      uint64_t BaseOffset =
1313        CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
1314      FillInNullDataMemberPointers(CGM, I.getType(),
1315                                   Elements, StartOffset + BaseOffset);
1316    }
1317
1318    // Visit all fields.
1319    unsigned FieldNo = 0;
1320    for (RecordDecl::field_iterator I = RD->field_begin(),
1321         E = RD->field_end(); I != E; ++I, ++FieldNo) {
1322      QualType FieldType = I->getType();
1323
1324      if (CGM.getTypes().isZeroInitializable(FieldType))
1325        continue;
1326
1327      uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1328      FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1329    }
1330  } else {
1331    assert(T->isMemberPointerType() && "Should only see member pointers here!");
1332    assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1333           "Should only see pointers to data members here!");
1334
1335    CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
1336    CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
1337
1338    // FIXME: hardcodes Itanium member pointer representation!
1339    llvm::Constant *NegativeOne =
1340      llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
1341
1342    // Fill in the null data member pointer.
1343    for (CharUnits I = StartIndex; I != EndIndex; ++I)
1344      Elements[I.getQuantity()] = NegativeOne;
1345  }
1346}
1347
1348static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1349                                               llvm::Type *baseType,
1350                                               const CXXRecordDecl *base);
1351
1352static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1353                                        const CXXRecordDecl *record,
1354                                        bool asCompleteObject) {
1355  const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1356  llvm::StructType *structure =
1357    (asCompleteObject ? layout.getLLVMType()
1358                      : layout.getBaseSubobjectLLVMType());
1359
1360  unsigned numElements = structure->getNumElements();
1361  std::vector<llvm::Constant *> elements(numElements);
1362
1363  // Fill in all the bases.
1364  for (const auto &I : record->bases()) {
1365    if (I.isVirtual()) {
1366      // Ignore virtual bases; if we're laying out for a complete
1367      // object, we'll lay these out later.
1368      continue;
1369    }
1370
1371    const CXXRecordDecl *base =
1372      cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1373
1374    // Ignore empty bases.
1375    if (base->isEmpty())
1376      continue;
1377
1378    unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1379    llvm::Type *baseType = structure->getElementType(fieldIndex);
1380    elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1381  }
1382
1383  // Fill in all the fields.
1384  for (const auto *Field : record->fields()) {
1385    // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
1386    // will fill in later.)
1387    if (!Field->isBitField()) {
1388      unsigned fieldIndex = layout.getLLVMFieldNo(Field);
1389      elements[fieldIndex] = CGM.EmitNullConstant(Field->getType());
1390    }
1391
1392    // For unions, stop after the first named field.
1393    if (record->isUnion() && Field->getDeclName())
1394      break;
1395  }
1396
1397  // Fill in the virtual bases, if we're working with the complete object.
1398  if (asCompleteObject) {
1399    for (const auto &I : record->vbases()) {
1400      const CXXRecordDecl *base =
1401        cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1402
1403      // Ignore empty bases.
1404      if (base->isEmpty())
1405        continue;
1406
1407      unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1408
1409      // We might have already laid this field out.
1410      if (elements[fieldIndex]) continue;
1411
1412      llvm::Type *baseType = structure->getElementType(fieldIndex);
1413      elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1414    }
1415  }
1416
1417  // Now go through all other fields and zero them out.
1418  for (unsigned i = 0; i != numElements; ++i) {
1419    if (!elements[i])
1420      elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1421  }
1422
1423  return llvm::ConstantStruct::get(structure, elements);
1424}
1425
1426/// Emit the null constant for a base subobject.
1427static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1428                                               llvm::Type *baseType,
1429                                               const CXXRecordDecl *base) {
1430  const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1431
1432  // Just zero out bases that don't have any pointer to data members.
1433  if (baseLayout.isZeroInitializableAsBase())
1434    return llvm::Constant::getNullValue(baseType);
1435
1436  // If the base type is a struct, we can just use its null constant.
1437  if (isa<llvm::StructType>(baseType)) {
1438    return EmitNullConstant(CGM, base, /*complete*/ false);
1439  }
1440
1441  // Otherwise, some bases are represented as arrays of i8 if the size
1442  // of the base is smaller than its corresponding LLVM type.  Figure
1443  // out how many elements this base array has.
1444  llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1445  unsigned numBaseElements = baseArrayType->getNumElements();
1446
1447  // Fill in null data member pointers.
1448  SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
1449  FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1450                               baseElements, 0);
1451
1452  // Now go through all other elements and zero them out.
1453  if (numBaseElements) {
1454    llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
1455    for (unsigned i = 0; i != numBaseElements; ++i) {
1456      if (!baseElements[i])
1457        baseElements[i] = i8_zero;
1458    }
1459  }
1460
1461  return llvm::ConstantArray::get(baseArrayType, baseElements);
1462}
1463
1464llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1465  if (getTypes().isZeroInitializable(T))
1466    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1467
1468  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1469    llvm::ArrayType *ATy =
1470      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1471
1472    QualType ElementTy = CAT->getElementType();
1473
1474    llvm::Constant *Element = EmitNullConstant(ElementTy);
1475    unsigned NumElements = CAT->getSize().getZExtValue();
1476
1477    if (Element->isNullValue())
1478      return llvm::ConstantAggregateZero::get(ATy);
1479
1480    SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
1481    return llvm::ConstantArray::get(ATy, Array);
1482  }
1483
1484  if (const RecordType *RT = T->getAs<RecordType>()) {
1485    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1486    return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1487  }
1488
1489  assert(T->isMemberPointerType() && "Should only see member pointers here!");
1490  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1491         "Should only see pointers to data members here!");
1492
1493  // Itanium C++ ABI 2.3:
1494  //   A NULL pointer is represented as -1.
1495  return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1496}
1497
1498llvm::Constant *
1499CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
1500  return ::EmitNullConstant(*this, Record, false);
1501}
1502