CGRecordLayoutBuilder.cpp revision 219077
1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Builder implementation for CGRecordLayout objects.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CGRecordLayout.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/CXXInheritance.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/RecordLayout.h"
21#include "CodeGenTypes.h"
22#include "CGCXXABI.h"
23#include "llvm/DerivedTypes.h"
24#include "llvm/Type.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/raw_ostream.h"
27#include "llvm/Target/TargetData.h"
28using namespace clang;
29using namespace CodeGen;
30
31namespace {
32
33class CGRecordLayoutBuilder {
34public:
35  /// FieldTypes - Holds the LLVM types that the struct is created from.
36  ///
37  std::vector<const llvm::Type *> FieldTypes;
38
39  /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
40  /// of the struct. For example, consider:
41  ///
42  /// struct A { int i; };
43  /// struct B { void *v; };
44  /// struct C : virtual A, B { };
45  ///
46  /// The LLVM type of C will be
47  /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
48  ///
49  /// And the LLVM type of the non-virtual base struct will be
50  /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
51  ///
52  /// This only gets initialized if the base subobject type is
53  /// different from the complete-object type.
54  const llvm::StructType *BaseSubobjectType;
55
56  /// FieldInfo - Holds a field and its corresponding LLVM field number.
57  llvm::DenseMap<const FieldDecl *, unsigned> Fields;
58
59  /// BitFieldInfo - Holds location and size information about a bit field.
60  llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
61
62  llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
63  llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
64
65  /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
66  /// primary base classes for some other direct or indirect base class.
67  CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
68
69  /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
70  /// avoid laying out virtual bases more than once.
71  llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
72
73  /// IsZeroInitializable - Whether this struct can be C++
74  /// zero-initialized with an LLVM zeroinitializer.
75  bool IsZeroInitializable;
76  bool IsZeroInitializableAsBase;
77
78  /// Packed - Whether the resulting LLVM struct will be packed or not.
79  bool Packed;
80
81private:
82  CodeGenTypes &Types;
83
84  /// Alignment - Contains the alignment of the RecordDecl.
85  //
86  // FIXME: This is not needed and should be removed.
87  CharUnits Alignment;
88
89  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
90  /// this will have the number of bits still available in the field.
91  char BitsAvailableInLastField;
92
93  /// NextFieldOffset - Holds the next field offset.
94  CharUnits NextFieldOffset;
95
96  /// LayoutUnionField - Will layout a field in an union and return the type
97  /// that the field will have.
98  const llvm::Type *LayoutUnionField(const FieldDecl *Field,
99                                     const ASTRecordLayout &Layout);
100
101  /// LayoutUnion - Will layout a union RecordDecl.
102  void LayoutUnion(const RecordDecl *D);
103
104  /// LayoutField - try to layout all fields in the record decl.
105  /// Returns false if the operation failed because the struct is not packed.
106  bool LayoutFields(const RecordDecl *D);
107
108  /// Layout a single base, virtual or non-virtual
109  void LayoutBase(const CXXRecordDecl *base,
110                  const CGRecordLayout &baseLayout,
111                  CharUnits baseOffset);
112
113  /// LayoutVirtualBase - layout a single virtual base.
114  void LayoutVirtualBase(const CXXRecordDecl *base,
115                         CharUnits baseOffset);
116
117  /// LayoutVirtualBases - layout the virtual bases of a record decl.
118  void LayoutVirtualBases(const CXXRecordDecl *RD,
119                          const ASTRecordLayout &Layout);
120
121  /// LayoutNonVirtualBase - layout a single non-virtual base.
122  void LayoutNonVirtualBase(const CXXRecordDecl *base,
123                            CharUnits baseOffset);
124
125  /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
126  void LayoutNonVirtualBases(const CXXRecordDecl *RD,
127                             const ASTRecordLayout &Layout);
128
129  /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
130  bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
131
132  /// LayoutField - layout a single field. Returns false if the operation failed
133  /// because the current struct is not packed.
134  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
135
136  /// LayoutBitField - layout a single bit field.
137  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
138
139  /// AppendField - Appends a field with the given offset and type.
140  void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
141
142  /// AppendPadding - Appends enough padding bytes so that the total
143  /// struct size is a multiple of the field alignment.
144  void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
145
146  /// getByteArrayType - Returns a byte array type with the given number of
147  /// elements.
148  const llvm::Type *getByteArrayType(CharUnits NumBytes);
149
150  /// AppendBytes - Append a given number of bytes to the record.
151  void AppendBytes(CharUnits numBytes);
152
153  /// AppendTailPadding - Append enough tail padding so that the type will have
154  /// the passed size.
155  void AppendTailPadding(uint64_t RecordSize);
156
157  CharUnits getTypeAlignment(const llvm::Type *Ty) const;
158
159  /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
160  /// LLVM element types.
161  CharUnits getAlignmentAsLLVMStruct() const;
162
163  /// CheckZeroInitializable - Check if the given type contains a pointer
164  /// to data member.
165  void CheckZeroInitializable(QualType T);
166
167public:
168  CGRecordLayoutBuilder(CodeGenTypes &Types)
169    : BaseSubobjectType(0),
170      IsZeroInitializable(true), IsZeroInitializableAsBase(true),
171      Packed(false), Types(Types), BitsAvailableInLastField(0) { }
172
173  /// Layout - Will layout a RecordDecl.
174  void Layout(const RecordDecl *D);
175};
176
177}
178
179void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
180  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
181  Packed = D->hasAttr<PackedAttr>();
182
183  if (D->isUnion()) {
184    LayoutUnion(D);
185    return;
186  }
187
188  if (LayoutFields(D))
189    return;
190
191  // We weren't able to layout the struct. Try again with a packed struct
192  Packed = true;
193  NextFieldOffset = CharUnits::Zero();
194  FieldTypes.clear();
195  Fields.clear();
196  BitFields.clear();
197  NonVirtualBases.clear();
198  VirtualBases.clear();
199
200  LayoutFields(D);
201}
202
203CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
204                               const FieldDecl *FD,
205                               uint64_t FieldOffset,
206                               uint64_t FieldSize,
207                               uint64_t ContainingTypeSizeInBits,
208                               unsigned ContainingTypeAlign) {
209  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
210  CharUnits TypeSizeInBytes =
211    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
212  uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
213
214  bool IsSigned = FD->getType()->isSignedIntegerType();
215
216  if (FieldSize > TypeSizeInBits) {
217    // We have a wide bit-field. The extra bits are only used for padding, so
218    // if we have a bitfield of type T, with size N:
219    //
220    // T t : N;
221    //
222    // We can just assume that it's:
223    //
224    // T t : sizeof(T);
225    //
226    FieldSize = TypeSizeInBits;
227  }
228
229  // in big-endian machines the first fields are in higher bit positions,
230  // so revert the offset. The byte offsets are reversed(back) later.
231  if (Types.getTargetData().isBigEndian()) {
232    FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
233  }
234
235  // Compute the access components. The policy we use is to start by attempting
236  // to access using the width of the bit-field type itself and to always access
237  // at aligned indices of that type. If such an access would fail because it
238  // extends past the bound of the type, then we reduce size to the next smaller
239  // power of two and retry. The current algorithm assumes pow2 sized types,
240  // although this is easy to fix.
241  //
242  assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
243  CGBitFieldInfo::AccessInfo Components[3];
244  unsigned NumComponents = 0;
245  unsigned AccessedTargetBits = 0;       // The tumber of target bits accessed.
246  unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
247
248  // Round down from the field offset to find the first access position that is
249  // at an aligned offset of the initial access type.
250  uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
251
252  // Adjust initial access size to fit within record.
253  while (AccessWidth > Types.getTarget().getCharWidth() &&
254         AccessStart + AccessWidth > ContainingTypeSizeInBits) {
255    AccessWidth >>= 1;
256    AccessStart = FieldOffset - (FieldOffset % AccessWidth);
257  }
258
259  while (AccessedTargetBits < FieldSize) {
260    // Check that we can access using a type of this size, without reading off
261    // the end of the structure. This can occur with packed structures and
262    // -fno-bitfield-type-align, for example.
263    if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
264      // If so, reduce access size to the next smaller power-of-two and retry.
265      AccessWidth >>= 1;
266      assert(AccessWidth >= Types.getTarget().getCharWidth()
267             && "Cannot access under byte size!");
268      continue;
269    }
270
271    // Otherwise, add an access component.
272
273    // First, compute the bits inside this access which are part of the
274    // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
275    // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
276    // in the target that we are reading.
277    assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
278    assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
279    uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
280    uint64_t AccessBitsInFieldSize =
281      std::min(AccessWidth + AccessStart,
282               FieldOffset + FieldSize) - AccessBitsInFieldStart;
283
284    assert(NumComponents < 3 && "Unexpected number of components!");
285    CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
286    AI.FieldIndex = 0;
287    // FIXME: We still follow the old access pattern of only using the field
288    // byte offset. We should switch this once we fix the struct layout to be
289    // pretty.
290
291    // on big-endian machines we reverted the bit offset because first fields are
292    // in higher bits. But this also reverts the bytes, so fix this here by reverting
293    // the byte offset on big-endian machines.
294    if (Types.getTargetData().isBigEndian()) {
295      AI.FieldByteOffset = (ContainingTypeSizeInBits - AccessStart - AccessWidth )/8;
296    } else {
297      AI.FieldByteOffset = AccessStart / 8;
298    }
299    AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
300    AI.AccessWidth = AccessWidth;
301    AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
302    AI.TargetBitOffset = AccessedTargetBits;
303    AI.TargetBitWidth = AccessBitsInFieldSize;
304
305    AccessStart += AccessWidth;
306    AccessedTargetBits += AI.TargetBitWidth;
307  }
308
309  assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
310  return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
311}
312
313CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
314                                        const FieldDecl *FD,
315                                        uint64_t FieldOffset,
316                                        uint64_t FieldSize) {
317  const RecordDecl *RD = FD->getParent();
318  const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
319  uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
320  unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
321
322  return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
323                  ContainingTypeAlign);
324}
325
326void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
327                                           uint64_t fieldOffset) {
328  uint64_t fieldSize =
329    D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
330
331  if (fieldSize == 0)
332    return;
333
334  uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
335  unsigned numBytesToAppend;
336
337  if (fieldOffset < nextFieldOffsetInBits) {
338    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
339    assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
340
341    // The bitfield begins in the previous bit-field.
342    numBytesToAppend =
343      llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 8) / 8;
344  } else {
345    assert(fieldOffset % 8 == 0 && "Field offset not aligned correctly");
346
347    // Append padding if necessary.
348    AppendPadding(CharUnits::fromQuantity(fieldOffset / 8), CharUnits::One());
349
350    numBytesToAppend = llvm::RoundUpToAlignment(fieldSize, 8) / 8;
351
352    assert(numBytesToAppend && "No bytes to append!");
353  }
354
355  // Add the bit field info.
356  BitFields.insert(std::make_pair(D,
357                   CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
358
359  AppendBytes(CharUnits::fromQuantity(numBytesToAppend));
360
361  BitsAvailableInLastField =
362    NextFieldOffset.getQuantity() * 8 - (fieldOffset + fieldSize);
363}
364
365bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
366                                        uint64_t fieldOffset) {
367  // If the field is packed, then we need a packed struct.
368  if (!Packed && D->hasAttr<PackedAttr>())
369    return false;
370
371  if (D->isBitField()) {
372    // We must use packed structs for unnamed bit fields since they
373    // don't affect the struct alignment.
374    if (!Packed && !D->getDeclName())
375      return false;
376
377    LayoutBitField(D, fieldOffset);
378    return true;
379  }
380
381  CheckZeroInitializable(D->getType());
382
383  assert(fieldOffset % Types.getTarget().getCharWidth() == 0
384         && "field offset is not on a byte boundary!");
385  CharUnits fieldOffsetInBytes
386    = Types.getContext().toCharUnitsFromBits(fieldOffset);
387
388  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
389  CharUnits typeAlignment = getTypeAlignment(Ty);
390
391  // If the type alignment is larger then the struct alignment, we must use
392  // a packed struct.
393  if (typeAlignment > Alignment) {
394    assert(!Packed && "Alignment is wrong even with packed struct!");
395    return false;
396  }
397
398  if (!Packed) {
399    if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
400      const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
401      if (const MaxFieldAlignmentAttr *MFAA =
402            RD->getAttr<MaxFieldAlignmentAttr>()) {
403        if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
404          return false;
405      }
406    }
407  }
408
409  // Round up the field offset to the alignment of the field type.
410  CharUnits alignedNextFieldOffsetInBytes =
411    NextFieldOffset.RoundUpToAlignment(typeAlignment);
412
413  if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
414    assert(!Packed && "Could not place field even with packed struct!");
415    return false;
416  }
417
418  AppendPadding(fieldOffsetInBytes, typeAlignment);
419
420  // Now append the field.
421  Fields[D] = FieldTypes.size();
422  AppendField(fieldOffsetInBytes, Ty);
423
424  return true;
425}
426
427const llvm::Type *
428CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
429                                        const ASTRecordLayout &Layout) {
430  if (Field->isBitField()) {
431    uint64_t FieldSize =
432      Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
433
434    // Ignore zero sized bit fields.
435    if (FieldSize == 0)
436      return 0;
437
438    const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
439    unsigned NumBytesToAppend =
440      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
441
442    if (NumBytesToAppend > 1)
443      FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
444
445    // Add the bit field info.
446    BitFields.insert(std::make_pair(Field,
447                         CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
448    return FieldTy;
449  }
450
451  // This is a regular union field.
452  Fields[Field] = 0;
453  return Types.ConvertTypeForMemRecursive(Field->getType());
454}
455
456void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
457  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
458
459  const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
460
461  const llvm::Type *unionType = 0;
462  CharUnits unionSize = CharUnits::Zero();
463  CharUnits unionAlign = CharUnits::Zero();
464
465  bool hasOnlyZeroSizedBitFields = true;
466
467  unsigned fieldNo = 0;
468  for (RecordDecl::field_iterator field = D->field_begin(),
469       fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
470    assert(layout.getFieldOffset(fieldNo) == 0 &&
471          "Union field offset did not start at the beginning of record!");
472    const llvm::Type *fieldType = LayoutUnionField(*field, layout);
473
474    if (!fieldType)
475      continue;
476
477    hasOnlyZeroSizedBitFields = false;
478
479    CharUnits fieldAlign = CharUnits::fromQuantity(
480                          Types.getTargetData().getABITypeAlignment(fieldType));
481    CharUnits fieldSize = CharUnits::fromQuantity(
482                             Types.getTargetData().getTypeAllocSize(fieldType));
483
484    if (fieldAlign < unionAlign)
485      continue;
486
487    if (fieldAlign > unionAlign || fieldSize > unionSize) {
488      unionType = fieldType;
489      unionAlign = fieldAlign;
490      unionSize = fieldSize;
491    }
492  }
493
494  // Now add our field.
495  if (unionType) {
496    AppendField(CharUnits::Zero(), unionType);
497
498    if (getTypeAlignment(unionType) > layout.getAlignment()) {
499      // We need a packed struct.
500      Packed = true;
501      unionAlign = CharUnits::One();
502    }
503  }
504  if (unionAlign.isZero()) {
505    assert(hasOnlyZeroSizedBitFields &&
506           "0-align record did not have all zero-sized bit-fields!");
507    unionAlign = CharUnits::One();
508  }
509
510  // Append tail padding.
511  CharUnits recordSize = layout.getSize();
512  if (recordSize > unionSize)
513    AppendPadding(recordSize, unionAlign);
514}
515
516void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
517                                       const CGRecordLayout &baseLayout,
518                                       CharUnits baseOffset) {
519  AppendPadding(baseOffset, CharUnits::One());
520
521  const ASTRecordLayout &baseASTLayout
522    = Types.getContext().getASTRecordLayout(base);
523
524  // Fields and bases can be laid out in the tail padding of previous
525  // bases.  If this happens, we need to allocate the base as an i8
526  // array; otherwise, we can use the subobject type.  However,
527  // actually doing that would require knowledge of what immediately
528  // follows this base in the layout, so instead we do a conservative
529  // approximation, which is to use the base subobject type if it
530  // has the same LLVM storage size as the nvsize.
531
532  // The nvsize, i.e. the unpadded size of the base class.
533  CharUnits nvsize = baseASTLayout.getNonVirtualSize();
534
535#if 0
536  const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
537  const llvm::StructLayout *baseLLVMLayout =
538    Types.getTargetData().getStructLayout(subobjectType);
539  CharUnits stsize = CharUnits::fromQuantity(baseLLVMLayout->getSizeInBytes());
540
541  if (nvsize == stsize)
542    AppendField(baseOffset, subobjectType);
543  else
544#endif
545    AppendBytes(nvsize);
546}
547
548void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
549                                                 CharUnits baseOffset) {
550  // Ignore empty bases.
551  if (base->isEmpty()) return;
552
553  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
554  if (IsZeroInitializableAsBase) {
555    assert(IsZeroInitializable &&
556           "class zero-initializable as base but not as complete object");
557
558    IsZeroInitializable = IsZeroInitializableAsBase =
559      baseLayout.isZeroInitializableAsBase();
560  }
561
562  LayoutBase(base, baseLayout, baseOffset);
563  NonVirtualBases[base] = (FieldTypes.size() - 1);
564}
565
566void
567CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
568                                         CharUnits baseOffset) {
569  // Ignore empty bases.
570  if (base->isEmpty()) return;
571
572  const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
573  if (IsZeroInitializable)
574    IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
575
576  LayoutBase(base, baseLayout, baseOffset);
577  VirtualBases[base] = (FieldTypes.size() - 1);
578}
579
580/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
581void
582CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
583                                          const ASTRecordLayout &Layout) {
584  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
585       E = RD->bases_end(); I != E; ++I) {
586    const CXXRecordDecl *BaseDecl =
587      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
588
589    // We only want to lay out virtual bases that aren't indirect primary bases
590    // of some other base.
591    if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
592      // Only lay out the base once.
593      if (!LaidOutVirtualBases.insert(BaseDecl))
594        continue;
595
596      CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
597      LayoutVirtualBase(BaseDecl, vbaseOffset);
598    }
599
600    if (!BaseDecl->getNumVBases()) {
601      // This base isn't interesting since it doesn't have any virtual bases.
602      continue;
603    }
604
605    LayoutVirtualBases(BaseDecl, Layout);
606  }
607}
608
609void
610CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
611                                             const ASTRecordLayout &Layout) {
612  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
613
614  // Check if we need to add a vtable pointer.
615  if (RD->isDynamicClass()) {
616    if (!PrimaryBase) {
617      const llvm::Type *FunctionType =
618        llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
619                                /*isVarArg=*/true);
620      const llvm::Type *VTableTy = FunctionType->getPointerTo();
621
622      assert(NextFieldOffset.isZero() &&
623             "VTable pointer must come first!");
624      AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
625    } else {
626      if (!Layout.isPrimaryBaseVirtual())
627        LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
628      else
629        LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
630    }
631  }
632
633  // Layout the non-virtual bases.
634  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
635       E = RD->bases_end(); I != E; ++I) {
636    if (I->isVirtual())
637      continue;
638
639    const CXXRecordDecl *BaseDecl =
640      cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
641
642    // We've already laid out the primary base.
643    if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
644      continue;
645
646    LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
647  }
648}
649
650bool
651CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
652  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
653
654  CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
655  CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
656  CharUnits AlignedNonVirtualTypeSize =
657    NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
658
659  // First check if we can use the same fields as for the complete class.
660  CharUnits RecordSize = Layout.getSize();
661  if (AlignedNonVirtualTypeSize == RecordSize)
662    return true;
663
664  // Check if we need padding.
665  CharUnits AlignedNextFieldOffset =
666    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
667
668  if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
669    assert(!Packed && "cannot layout even as packed struct");
670    return false; // Needs packing.
671  }
672
673  bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
674  if (needsPadding) {
675    CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
676    FieldTypes.push_back(getByteArrayType(NumBytes));
677  }
678
679  BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
680                                            FieldTypes, Packed);
681
682  if (needsPadding) {
683    // Pull the padding back off.
684    FieldTypes.pop_back();
685  }
686
687  return true;
688}
689
690bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
691  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
692  assert(!Alignment.isZero() && "Did not set alignment!");
693
694  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
695
696  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
697  if (RD)
698    LayoutNonVirtualBases(RD, Layout);
699
700  unsigned FieldNo = 0;
701
702  for (RecordDecl::field_iterator Field = D->field_begin(),
703       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
704    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
705      assert(!Packed &&
706             "Could not layout fields even with a packed LLVM struct!");
707      return false;
708    }
709  }
710
711  if (RD) {
712    // We've laid out the non-virtual bases and the fields, now compute the
713    // non-virtual base field types.
714    if (!ComputeNonVirtualBaseType(RD)) {
715      assert(!Packed && "Could not layout even with a packed LLVM struct!");
716      return false;
717    }
718
719    // And lay out the virtual bases.
720    RD->getIndirectPrimaryBases(IndirectPrimaryBases);
721    if (Layout.isPrimaryBaseVirtual())
722      IndirectPrimaryBases.insert(Layout.getPrimaryBase());
723    LayoutVirtualBases(RD, Layout);
724  }
725
726  // Append tail padding if necessary.
727  AppendTailPadding(Types.getContext().toBits(Layout.getSize()));
728
729  return true;
730}
731
732void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
733  assert(RecordSize % 8 == 0 && "Invalid record size!");
734
735  CharUnits RecordSizeInBytes =
736    Types.getContext().toCharUnitsFromBits(RecordSize);
737  assert(NextFieldOffset <= RecordSizeInBytes && "Size mismatch!");
738
739  CharUnits AlignedNextFieldOffset =
740    NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
741
742  if (AlignedNextFieldOffset == RecordSizeInBytes) {
743    // We don't need any padding.
744    return;
745  }
746
747  CharUnits NumPadBytes = RecordSizeInBytes - NextFieldOffset;
748  AppendBytes(NumPadBytes);
749}
750
751void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
752                                        const llvm::Type *fieldType) {
753  CharUnits fieldSize =
754    CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
755
756  FieldTypes.push_back(fieldType);
757
758  NextFieldOffset = fieldOffset + fieldSize;
759  BitsAvailableInLastField = 0;
760}
761
762void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
763                                          CharUnits fieldAlignment) {
764  assert(NextFieldOffset <= fieldOffset &&
765         "Incorrect field layout!");
766
767  // Round up the field offset to the alignment of the field type.
768  CharUnits alignedNextFieldOffset =
769    NextFieldOffset.RoundUpToAlignment(fieldAlignment);
770
771  if (alignedNextFieldOffset < fieldOffset) {
772    // Even with alignment, the field offset is not at the right place,
773    // insert padding.
774    CharUnits padding = fieldOffset - NextFieldOffset;
775
776    AppendBytes(padding);
777  }
778}
779
780const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
781  assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
782
783  const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
784  if (numBytes > CharUnits::One())
785    Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
786
787  return Ty;
788}
789
790void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
791  if (numBytes.isZero())
792    return;
793
794  // Append the padding field
795  AppendField(NextFieldOffset, getByteArrayType(numBytes));
796}
797
798CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
799  if (Packed)
800    return CharUnits::One();
801
802  return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
803}
804
805CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
806  if (Packed)
807    return CharUnits::One();
808
809  CharUnits maxAlignment = CharUnits::One();
810  for (size_t i = 0; i != FieldTypes.size(); ++i)
811    maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
812
813  return maxAlignment;
814}
815
816/// Merge in whether a field of the given type is zero-initializable.
817void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
818  // This record already contains a member pointer.
819  if (!IsZeroInitializableAsBase)
820    return;
821
822  // Can only have member pointers if we're compiling C++.
823  if (!Types.getContext().getLangOptions().CPlusPlus)
824    return;
825
826  const Type *elementType = T->getBaseElementTypeUnsafe();
827
828  if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
829    if (!Types.getCXXABI().isZeroInitializable(MPT))
830      IsZeroInitializable = IsZeroInitializableAsBase = false;
831  } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
832    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
833    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
834    if (!Layout.isZeroInitializable())
835      IsZeroInitializable = IsZeroInitializableAsBase = false;
836  }
837}
838
839CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
840  CGRecordLayoutBuilder Builder(*this);
841
842  Builder.Layout(D);
843
844  const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
845                                                     Builder.FieldTypes,
846                                                     Builder.Packed);
847
848  // If we're in C++, compute the base subobject type.
849  const llvm::StructType *BaseTy = 0;
850  if (isa<CXXRecordDecl>(D)) {
851    BaseTy = Builder.BaseSubobjectType;
852    if (!BaseTy) BaseTy = Ty;
853  }
854
855  CGRecordLayout *RL =
856    new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
857                       Builder.IsZeroInitializableAsBase);
858
859  RL->NonVirtualBases.swap(Builder.NonVirtualBases);
860  RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
861
862  // Add all the field numbers.
863  RL->FieldInfo.swap(Builder.Fields);
864
865  // Add bitfield info.
866  RL->BitFields.swap(Builder.BitFields);
867
868  // Dump the layout, if requested.
869  if (getContext().getLangOptions().DumpRecordLayouts) {
870    llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
871    llvm::errs() << "Record: ";
872    D->dump();
873    llvm::errs() << "\nLayout: ";
874    RL->dump();
875  }
876
877#ifndef NDEBUG
878  // Verify that the computed LLVM struct size matches the AST layout size.
879  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
880
881  uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
882  assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
883         "Type size mismatch!");
884
885  if (BaseTy) {
886    CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
887    CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
888    CharUnits AlignedNonVirtualTypeSize =
889      NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
890
891    uint64_t AlignedNonVirtualTypeSizeInBits =
892      getContext().toBits(AlignedNonVirtualTypeSize);
893
894    assert(AlignedNonVirtualTypeSizeInBits ==
895           getTargetData().getTypeAllocSizeInBits(BaseTy) &&
896           "Type size mismatch!");
897  }
898
899  // Verify that the LLVM and AST field offsets agree.
900  const llvm::StructType *ST =
901    dyn_cast<llvm::StructType>(RL->getLLVMType());
902  const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
903
904  const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
905  RecordDecl::field_iterator it = D->field_begin();
906  for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
907    const FieldDecl *FD = *it;
908
909    // For non-bit-fields, just check that the LLVM struct offset matches the
910    // AST offset.
911    if (!FD->isBitField()) {
912      unsigned FieldNo = RL->getLLVMFieldNo(FD);
913      assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
914             "Invalid field offset!");
915      continue;
916    }
917
918    // Ignore unnamed bit-fields.
919    if (!FD->getDeclName())
920      continue;
921
922    const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
923    for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
924      const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
925
926      // Verify that every component access is within the structure.
927      uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
928      uint64_t AccessBitOffset = FieldOffset +
929        getContext().toBits(CharUnits::fromQuantity(AI.FieldByteOffset));
930      assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
931             "Invalid bit-field access (out of range)!");
932    }
933  }
934#endif
935
936  return RL;
937}
938
939void CGRecordLayout::print(llvm::raw_ostream &OS) const {
940  OS << "<CGRecordLayout\n";
941  OS << "  LLVMType:" << *CompleteObjectType << "\n";
942  if (BaseSubobjectType)
943    OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
944  OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
945  OS << "  BitFields:[\n";
946
947  // Print bit-field infos in declaration order.
948  std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
949  for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
950         it = BitFields.begin(), ie = BitFields.end();
951       it != ie; ++it) {
952    const RecordDecl *RD = it->first->getParent();
953    unsigned Index = 0;
954    for (RecordDecl::field_iterator
955           it2 = RD->field_begin(); *it2 != it->first; ++it2)
956      ++Index;
957    BFIs.push_back(std::make_pair(Index, &it->second));
958  }
959  llvm::array_pod_sort(BFIs.begin(), BFIs.end());
960  for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
961    OS.indent(4);
962    BFIs[i].second->print(OS);
963    OS << "\n";
964  }
965
966  OS << "]>\n";
967}
968
969void CGRecordLayout::dump() const {
970  print(llvm::errs());
971}
972
973void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
974  OS << "<CGBitFieldInfo";
975  OS << " Size:" << Size;
976  OS << " IsSigned:" << IsSigned << "\n";
977
978  OS.indent(4 + strlen("<CGBitFieldInfo"));
979  OS << " NumComponents:" << getNumComponents();
980  OS << " Components: [";
981  if (getNumComponents()) {
982    OS << "\n";
983    for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
984      const AccessInfo &AI = getComponent(i);
985      OS.indent(8);
986      OS << "<AccessInfo"
987         << " FieldIndex:" << AI.FieldIndex
988         << " FieldByteOffset:" << AI.FieldByteOffset
989         << " FieldBitStart:" << AI.FieldBitStart
990         << " AccessWidth:" << AI.AccessWidth << "\n";
991      OS.indent(8 + strlen("<AccessInfo"));
992      OS << " AccessAlignment:" << AI.AccessAlignment
993         << " TargetBitOffset:" << AI.TargetBitOffset
994         << " TargetBitWidth:" << AI.TargetBitWidth
995         << ">\n";
996    }
997    OS.indent(4);
998  }
999  OS << "]>";
1000}
1001
1002void CGBitFieldInfo::dump() const {
1003  print(llvm::errs());
1004}
1005