ItaniumCXXABI.cpp revision 360784
1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI.  The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12//  http://www.codesourcery.com/public/cxx-abi/abi.html
13//  http://www.codesourcery.com/public/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
31#include "clang/CodeGen/ConstantInitBuilder.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39using namespace clang;
40using namespace CodeGen;
41
42namespace {
43class ItaniumCXXABI : public CodeGen::CGCXXABI {
44  /// VTables - All the vtables which have been defined.
45  llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47  /// All the thread wrapper functions that have been used.
48  llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49      ThreadWrappers;
50
51protected:
52  bool UseARMMethodPtrABI;
53  bool UseARMGuardVarABI;
54  bool Use32BitVTableOffsetABI;
55
56  ItaniumMangleContext &getMangleContext() {
57    return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58  }
59
60public:
61  ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                bool UseARMMethodPtrABI = false,
63                bool UseARMGuardVarABI = false) :
64    CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65    UseARMGuardVarABI(UseARMGuardVarABI),
66    Use32BitVTableOffsetABI(false) { }
67
68  bool classifyReturnType(CGFunctionInfo &FI) const override;
69
70  RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71    // If C++ prohibits us from making a copy, pass by address.
72    if (!RD->canPassInRegisters())
73      return RAA_Indirect;
74    return RAA_Default;
75  }
76
77  bool isThisCompleteObject(GlobalDecl GD) const override {
78    // The Itanium ABI has separate complete-object vs.  base-object
79    // variants of both constructors and destructors.
80    if (isa<CXXDestructorDecl>(GD.getDecl())) {
81      switch (GD.getDtorType()) {
82      case Dtor_Complete:
83      case Dtor_Deleting:
84        return true;
85
86      case Dtor_Base:
87        return false;
88
89      case Dtor_Comdat:
90        llvm_unreachable("emitting dtor comdat as function?");
91      }
92      llvm_unreachable("bad dtor kind");
93    }
94    if (isa<CXXConstructorDecl>(GD.getDecl())) {
95      switch (GD.getCtorType()) {
96      case Ctor_Complete:
97        return true;
98
99      case Ctor_Base:
100        return false;
101
102      case Ctor_CopyingClosure:
103      case Ctor_DefaultClosure:
104        llvm_unreachable("closure ctors in Itanium ABI?");
105
106      case Ctor_Comdat:
107        llvm_unreachable("emitting ctor comdat as function?");
108      }
109      llvm_unreachable("bad dtor kind");
110    }
111
112    // No other kinds.
113    return false;
114  }
115
116  bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118  llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
120  CGCallee
121    EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                    const Expr *E,
123                                    Address This,
124                                    llvm::Value *&ThisPtrForCall,
125                                    llvm::Value *MemFnPtr,
126                                    const MemberPointerType *MPT) override;
127
128  llvm::Value *
129    EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                 Address Base,
131                                 llvm::Value *MemPtr,
132                                 const MemberPointerType *MPT) override;
133
134  llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                           const CastExpr *E,
136                                           llvm::Value *Src) override;
137  llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                              llvm::Constant *Src) override;
139
140  llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142  llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143  llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                        CharUnits offset) override;
145  llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146  llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                     CharUnits ThisAdjustment);
148
149  llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                           llvm::Value *L, llvm::Value *R,
151                                           const MemberPointerType *MPT,
152                                           bool Inequality) override;
153
154  llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                         llvm::Value *Addr,
156                                         const MemberPointerType *MPT) override;
157
158  void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                               Address Ptr, QualType ElementType,
160                               const CXXDestructorDecl *Dtor) override;
161
162  void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163  void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167  llvm::CallInst *
168  emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                      llvm::Value *Exn) override;
170
171  void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172  llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173  CatchTypeInfo
174  getAddrOfCXXCatchHandlerType(QualType Ty,
175                               QualType CatchHandlerType) override {
176    return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177  }
178
179  bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180  void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181  llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                          Address ThisPtr,
183                          llvm::Type *StdTypeInfoPtrTy) override;
184
185  bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                          QualType SrcRecordTy) override;
187
188  llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                   QualType SrcRecordTy, QualType DestTy,
190                                   QualType DestRecordTy,
191                                   llvm::BasicBlock *CastEnd) override;
192
193  llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                     QualType SrcRecordTy,
195                                     QualType DestTy) override;
196
197  bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199  llvm::Value *
200    GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                              const CXXRecordDecl *ClassDecl,
202                              const CXXRecordDecl *BaseClassDecl) override;
203
204  void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206  AddedStructorArgs
207  buildStructorSignature(GlobalDecl GD,
208                         SmallVectorImpl<CanQualType> &ArgTys) override;
209
210  bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                              CXXDtorType DT) const override {
212    // Itanium does not emit any destructor variant as an inline thunk.
213    // Delegating may occur as an optimization, but all variants are either
214    // emitted with external linkage or as linkonce if they are inline and used.
215    return false;
216  }
217
218  void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
220  void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                 FunctionArgList &Params) override;
222
223  void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224
225  AddedStructorArgs
226  addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
227                             CXXCtorType Type, bool ForVirtualBase,
228                             bool Delegating, CallArgList &Args) override;
229
230  void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
231                          CXXDtorType Type, bool ForVirtualBase,
232                          bool Delegating, Address This,
233                          QualType ThisTy) override;
234
235  void emitVTableDefinitions(CodeGenVTables &CGVT,
236                             const CXXRecordDecl *RD) override;
237
238  bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
239                                           CodeGenFunction::VPtr Vptr) override;
240
241  bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
242    return true;
243  }
244
245  llvm::Constant *
246  getVTableAddressPoint(BaseSubobject Base,
247                        const CXXRecordDecl *VTableClass) override;
248
249  llvm::Value *getVTableAddressPointInStructor(
250      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
251      BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
252
253  llvm::Value *getVTableAddressPointInStructorWithVTT(
254      CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
255      BaseSubobject Base, const CXXRecordDecl *NearestVBase);
256
257  llvm::Constant *
258  getVTableAddressPointForConstExpr(BaseSubobject Base,
259                                    const CXXRecordDecl *VTableClass) override;
260
261  llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
262                                        CharUnits VPtrOffset) override;
263
264  CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
265                                     Address This, llvm::Type *Ty,
266                                     SourceLocation Loc) override;
267
268  llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
269                                         const CXXDestructorDecl *Dtor,
270                                         CXXDtorType DtorType, Address This,
271                                         DeleteOrMemberCallExpr E) override;
272
273  void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
274
275  bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
276  bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
277
278  void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
279                       bool ReturnAdjustment) override {
280    // Allow inlining of thunks by emitting them with available_externally
281    // linkage together with vtables when needed.
282    if (ForVTable && !Thunk->hasLocalLinkage())
283      Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
284    CGM.setGVProperties(Thunk, GD);
285  }
286
287  bool exportThunk() override { return true; }
288
289  llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
290                                     const ThisAdjustment &TA) override;
291
292  llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
293                                       const ReturnAdjustment &RA) override;
294
295  size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
296                              FunctionArgList &Args) const override {
297    assert(!Args.empty() && "expected the arglist to not be empty!");
298    return Args.size() - 1;
299  }
300
301  StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
302  StringRef GetDeletedVirtualCallName() override
303    { return "__cxa_deleted_virtual"; }
304
305  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
306  Address InitializeArrayCookie(CodeGenFunction &CGF,
307                                Address NewPtr,
308                                llvm::Value *NumElements,
309                                const CXXNewExpr *expr,
310                                QualType ElementType) override;
311  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
312                                   Address allocPtr,
313                                   CharUnits cookieSize) override;
314
315  void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
316                       llvm::GlobalVariable *DeclPtr,
317                       bool PerformInit) override;
318  void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
319                          llvm::FunctionCallee dtor,
320                          llvm::Constant *addr) override;
321
322  llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
323                                                llvm::Value *Val);
324  void EmitThreadLocalInitFuncs(
325      CodeGenModule &CGM,
326      ArrayRef<const VarDecl *> CXXThreadLocals,
327      ArrayRef<llvm::Function *> CXXThreadLocalInits,
328      ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
329
330  /// Determine whether we will definitely emit this variable with a constant
331  /// initializer, either because the language semantics demand it or because
332  /// we know that the initializer is a constant.
333  bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
334    VD = VD->getMostRecentDecl();
335    if (VD->hasAttr<ConstInitAttr>())
336      return true;
337
338    // All later checks examine the initializer specified on the variable. If
339    // the variable is weak, such examination would not be correct.
340    if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
341      return false;
342
343    const VarDecl *InitDecl = VD->getInitializingDeclaration();
344    if (!InitDecl)
345      return false;
346
347    // If there's no initializer to run, this is constant initialization.
348    if (!InitDecl->hasInit())
349      return true;
350
351    // If we have the only definition, we don't need a thread wrapper if we
352    // will emit the value as a constant.
353    if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
354      return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
355
356    // Otherwise, we need a thread wrapper unless we know that every
357    // translation unit will emit the value as a constant. We rely on
358    // ICE-ness not varying between translation units, which isn't actually
359    // guaranteed by the standard but is necessary for sanity.
360    return InitDecl->isInitKnownICE() && InitDecl->isInitICE();
361  }
362
363  bool usesThreadWrapperFunction(const VarDecl *VD) const override {
364    return !isEmittedWithConstantInitializer(VD) ||
365           VD->needsDestruction(getContext());
366  }
367  LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
368                                      QualType LValType) override;
369
370  bool NeedsVTTParameter(GlobalDecl GD) override;
371
372  /**************************** RTTI Uniqueness ******************************/
373
374protected:
375  /// Returns true if the ABI requires RTTI type_info objects to be unique
376  /// across a program.
377  virtual bool shouldRTTIBeUnique() const { return true; }
378
379public:
380  /// What sort of unique-RTTI behavior should we use?
381  enum RTTIUniquenessKind {
382    /// We are guaranteeing, or need to guarantee, that the RTTI string
383    /// is unique.
384    RUK_Unique,
385
386    /// We are not guaranteeing uniqueness for the RTTI string, so we
387    /// can demote to hidden visibility but must use string comparisons.
388    RUK_NonUniqueHidden,
389
390    /// We are not guaranteeing uniqueness for the RTTI string, so we
391    /// have to use string comparisons, but we also have to emit it with
392    /// non-hidden visibility.
393    RUK_NonUniqueVisible
394  };
395
396  /// Return the required visibility status for the given type and linkage in
397  /// the current ABI.
398  RTTIUniquenessKind
399  classifyRTTIUniqueness(QualType CanTy,
400                         llvm::GlobalValue::LinkageTypes Linkage) const;
401  friend class ItaniumRTTIBuilder;
402
403  void emitCXXStructor(GlobalDecl GD) override;
404
405  std::pair<llvm::Value *, const CXXRecordDecl *>
406  LoadVTablePtr(CodeGenFunction &CGF, Address This,
407                const CXXRecordDecl *RD) override;
408
409 private:
410   bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
411     const auto &VtableLayout =
412         CGM.getItaniumVTableContext().getVTableLayout(RD);
413
414     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415       // Skip empty slot.
416       if (!VtableComponent.isUsedFunctionPointerKind())
417         continue;
418
419       const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
420       if (!Method->getCanonicalDecl()->isInlined())
421         continue;
422
423       StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
424       auto *Entry = CGM.GetGlobalValue(Name);
425       // This checks if virtual inline function has already been emitted.
426       // Note that it is possible that this inline function would be emitted
427       // after trying to emit vtable speculatively. Because of this we do
428       // an extra pass after emitting all deferred vtables to find and emit
429       // these vtables opportunistically.
430       if (!Entry || Entry->isDeclaration())
431         return true;
432     }
433     return false;
434  }
435
436  bool isVTableHidden(const CXXRecordDecl *RD) const {
437    const auto &VtableLayout =
438            CGM.getItaniumVTableContext().getVTableLayout(RD);
439
440    for (const auto &VtableComponent : VtableLayout.vtable_components()) {
441      if (VtableComponent.isRTTIKind()) {
442        const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
443        if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
444          return true;
445      } else if (VtableComponent.isUsedFunctionPointerKind()) {
446        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
447        if (Method->getVisibility() == Visibility::HiddenVisibility &&
448            !Method->isDefined())
449          return true;
450      }
451    }
452    return false;
453  }
454};
455
456class ARMCXXABI : public ItaniumCXXABI {
457public:
458  ARMCXXABI(CodeGen::CodeGenModule &CGM) :
459    ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
460                  /*UseARMGuardVarABI=*/true) {}
461
462  bool HasThisReturn(GlobalDecl GD) const override {
463    return (isa<CXXConstructorDecl>(GD.getDecl()) || (
464              isa<CXXDestructorDecl>(GD.getDecl()) &&
465              GD.getDtorType() != Dtor_Deleting));
466  }
467
468  void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
469                           QualType ResTy) override;
470
471  CharUnits getArrayCookieSizeImpl(QualType elementType) override;
472  Address InitializeArrayCookie(CodeGenFunction &CGF,
473                                Address NewPtr,
474                                llvm::Value *NumElements,
475                                const CXXNewExpr *expr,
476                                QualType ElementType) override;
477  llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
478                                   CharUnits cookieSize) override;
479};
480
481class iOS64CXXABI : public ARMCXXABI {
482public:
483  iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
484    Use32BitVTableOffsetABI = true;
485  }
486
487  // ARM64 libraries are prepared for non-unique RTTI.
488  bool shouldRTTIBeUnique() const override { return false; }
489};
490
491class FuchsiaCXXABI final : public ItaniumCXXABI {
492public:
493  explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
494      : ItaniumCXXABI(CGM) {}
495
496private:
497  bool HasThisReturn(GlobalDecl GD) const override {
498    return isa<CXXConstructorDecl>(GD.getDecl()) ||
499           (isa<CXXDestructorDecl>(GD.getDecl()) &&
500            GD.getDtorType() != Dtor_Deleting);
501  }
502};
503
504class WebAssemblyCXXABI final : public ItaniumCXXABI {
505public:
506  explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
507      : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
508                      /*UseARMGuardVarABI=*/true) {}
509  void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
510
511private:
512  bool HasThisReturn(GlobalDecl GD) const override {
513    return isa<CXXConstructorDecl>(GD.getDecl()) ||
514           (isa<CXXDestructorDecl>(GD.getDecl()) &&
515            GD.getDtorType() != Dtor_Deleting);
516  }
517  bool canCallMismatchedFunctionType() const override { return false; }
518};
519}
520
521CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
522  switch (CGM.getTarget().getCXXABI().getKind()) {
523  // For IR-generation purposes, there's no significant difference
524  // between the ARM and iOS ABIs.
525  case TargetCXXABI::GenericARM:
526  case TargetCXXABI::iOS:
527  case TargetCXXABI::WatchOS:
528    return new ARMCXXABI(CGM);
529
530  case TargetCXXABI::iOS64:
531    return new iOS64CXXABI(CGM);
532
533  case TargetCXXABI::Fuchsia:
534    return new FuchsiaCXXABI(CGM);
535
536  // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
537  // include the other 32-bit ARM oddities: constructor/destructor return values
538  // and array cookies.
539  case TargetCXXABI::GenericAArch64:
540    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
541                             /*UseARMGuardVarABI=*/true);
542
543  case TargetCXXABI::GenericMIPS:
544    return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
545
546  case TargetCXXABI::WebAssembly:
547    return new WebAssemblyCXXABI(CGM);
548
549  case TargetCXXABI::GenericItanium:
550    if (CGM.getContext().getTargetInfo().getTriple().getArch()
551        == llvm::Triple::le32) {
552      // For PNaCl, use ARM-style method pointers so that PNaCl code
553      // does not assume anything about the alignment of function
554      // pointers.
555      return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
556    }
557    return new ItaniumCXXABI(CGM);
558
559  case TargetCXXABI::Microsoft:
560    llvm_unreachable("Microsoft ABI is not Itanium-based");
561  }
562  llvm_unreachable("bad ABI kind");
563}
564
565llvm::Type *
566ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
567  if (MPT->isMemberDataPointer())
568    return CGM.PtrDiffTy;
569  return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
570}
571
572/// In the Itanium and ARM ABIs, method pointers have the form:
573///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
574///
575/// In the Itanium ABI:
576///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
577///  - the this-adjustment is (memptr.adj)
578///  - the virtual offset is (memptr.ptr - 1)
579///
580/// In the ARM ABI:
581///  - method pointers are virtual if (memptr.adj & 1) is nonzero
582///  - the this-adjustment is (memptr.adj >> 1)
583///  - the virtual offset is (memptr.ptr)
584/// ARM uses 'adj' for the virtual flag because Thumb functions
585/// may be only single-byte aligned.
586///
587/// If the member is virtual, the adjusted 'this' pointer points
588/// to a vtable pointer from which the virtual offset is applied.
589///
590/// If the member is non-virtual, memptr.ptr is the address of
591/// the function to call.
592CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
593    CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
594    llvm::Value *&ThisPtrForCall,
595    llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
596  CGBuilderTy &Builder = CGF.Builder;
597
598  const FunctionProtoType *FPT =
599    MPT->getPointeeType()->getAs<FunctionProtoType>();
600  auto *RD =
601      cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
602
603  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
604      CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
605
606  llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
607
608  llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
609  llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
610  llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
611
612  // Extract memptr.adj, which is in the second field.
613  llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
614
615  // Compute the true adjustment.
616  llvm::Value *Adj = RawAdj;
617  if (UseARMMethodPtrABI)
618    Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
619
620  // Apply the adjustment and cast back to the original struct type
621  // for consistency.
622  llvm::Value *This = ThisAddr.getPointer();
623  llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
624  Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
625  This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
626  ThisPtrForCall = This;
627
628  // Load the function pointer.
629  llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
630
631  // If the LSB in the function pointer is 1, the function pointer points to
632  // a virtual function.
633  llvm::Value *IsVirtual;
634  if (UseARMMethodPtrABI)
635    IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
636  else
637    IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
638  IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
639  Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
640
641  // In the virtual path, the adjustment left 'This' pointing to the
642  // vtable of the correct base subobject.  The "function pointer" is an
643  // offset within the vtable (+1 for the virtual flag on non-ARM).
644  CGF.EmitBlock(FnVirtual);
645
646  // Cast the adjusted this to a pointer to vtable pointer and load.
647  llvm::Type *VTableTy = Builder.getInt8PtrTy();
648  CharUnits VTablePtrAlign =
649    CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
650                                      CGF.getPointerAlign());
651  llvm::Value *VTable =
652    CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
653
654  // Apply the offset.
655  // On ARM64, to reserve extra space in virtual member function pointers,
656  // we only pay attention to the low 32 bits of the offset.
657  llvm::Value *VTableOffset = FnAsInt;
658  if (!UseARMMethodPtrABI)
659    VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
660  if (Use32BitVTableOffsetABI) {
661    VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
662    VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
663  }
664
665  // Check the address of the function pointer if CFI on member function
666  // pointers is enabled.
667  llvm::Constant *CheckSourceLocation;
668  llvm::Constant *CheckTypeDesc;
669  bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
670                            CGM.HasHiddenLTOVisibility(RD);
671  bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
672                           CGM.HasHiddenLTOVisibility(RD);
673  llvm::Value *VirtualFn = nullptr;
674
675  {
676    CodeGenFunction::SanitizerScope SanScope(&CGF);
677    llvm::Value *TypeId = nullptr;
678    llvm::Value *CheckResult = nullptr;
679
680    if (ShouldEmitCFICheck || ShouldEmitVFEInfo) {
681      // If doing CFI or VFE, we will need the metadata node to check against.
682      llvm::Metadata *MD =
683          CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
684      TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
685    }
686
687    llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
688
689    if (ShouldEmitVFEInfo) {
690      // If doing VFE, load from the vtable with a type.checked.load intrinsic
691      // call. Note that we use the GEP to calculate the address to load from
692      // and pass 0 as the offset to the intrinsic. This is because every
693      // vtable slot of the correct type is marked with matching metadata, and
694      // we know that the load must be from one of these slots.
695      llvm::Value *CheckedLoad = Builder.CreateCall(
696          CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
697          {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
698      CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
699      VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
700      VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
701                                        "memptr.virtualfn");
702    } else {
703      // When not doing VFE, emit a normal load, as it allows more
704      // optimisations than type.checked.load.
705      if (ShouldEmitCFICheck) {
706        CheckResult = Builder.CreateCall(
707            CGM.getIntrinsic(llvm::Intrinsic::type_test),
708            {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
709      }
710      VFPAddr =
711          Builder.CreateBitCast(VFPAddr, FTy->getPointerTo()->getPointerTo());
712      VirtualFn = Builder.CreateAlignedLoad(VFPAddr, CGF.getPointerAlign(),
713                                            "memptr.virtualfn");
714    }
715    assert(VirtualFn && "Virtual fuction pointer not created!");
716    assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || CheckResult) &&
717           "Check result required but not created!");
718
719    if (ShouldEmitCFICheck) {
720      // If doing CFI, emit the check.
721      CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
722      CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
723      llvm::Constant *StaticData[] = {
724          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
725          CheckSourceLocation,
726          CheckTypeDesc,
727      };
728
729      if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
730        CGF.EmitTrapCheck(CheckResult);
731      } else {
732        llvm::Value *AllVtables = llvm::MetadataAsValue::get(
733            CGM.getLLVMContext(),
734            llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
735        llvm::Value *ValidVtable = Builder.CreateCall(
736            CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
737        CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
738                      SanitizerHandler::CFICheckFail, StaticData,
739                      {VTable, ValidVtable});
740      }
741
742      FnVirtual = Builder.GetInsertBlock();
743    }
744  } // End of sanitizer scope
745
746  CGF.EmitBranch(FnEnd);
747
748  // In the non-virtual path, the function pointer is actually a
749  // function pointer.
750  CGF.EmitBlock(FnNonVirtual);
751  llvm::Value *NonVirtualFn =
752    Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
753
754  // Check the function pointer if CFI on member function pointers is enabled.
755  if (ShouldEmitCFICheck) {
756    CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
757    if (RD->hasDefinition()) {
758      CodeGenFunction::SanitizerScope SanScope(&CGF);
759
760      llvm::Constant *StaticData[] = {
761          llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
762          CheckSourceLocation,
763          CheckTypeDesc,
764      };
765
766      llvm::Value *Bit = Builder.getFalse();
767      llvm::Value *CastedNonVirtualFn =
768          Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
769      for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
770        llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
771            getContext().getMemberPointerType(
772                MPT->getPointeeType(),
773                getContext().getRecordType(Base).getTypePtr()));
774        llvm::Value *TypeId =
775            llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
776
777        llvm::Value *TypeTest =
778            Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
779                               {CastedNonVirtualFn, TypeId});
780        Bit = Builder.CreateOr(Bit, TypeTest);
781      }
782
783      CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
784                    SanitizerHandler::CFICheckFail, StaticData,
785                    {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
786
787      FnNonVirtual = Builder.GetInsertBlock();
788    }
789  }
790
791  // We're done.
792  CGF.EmitBlock(FnEnd);
793  llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
794  CalleePtr->addIncoming(VirtualFn, FnVirtual);
795  CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
796
797  CGCallee Callee(FPT, CalleePtr);
798  return Callee;
799}
800
801/// Compute an l-value by applying the given pointer-to-member to a
802/// base object.
803llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
804    CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
805    const MemberPointerType *MPT) {
806  assert(MemPtr->getType() == CGM.PtrDiffTy);
807
808  CGBuilderTy &Builder = CGF.Builder;
809
810  // Cast to char*.
811  Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
812
813  // Apply the offset, which we assume is non-null.
814  llvm::Value *Addr =
815    Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
816
817  // Cast the address to the appropriate pointer type, adopting the
818  // address space of the base pointer.
819  llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
820                            ->getPointerTo(Base.getAddressSpace());
821  return Builder.CreateBitCast(Addr, PType);
822}
823
824/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
825/// conversion.
826///
827/// Bitcast conversions are always a no-op under Itanium.
828///
829/// Obligatory offset/adjustment diagram:
830///         <-- offset -->          <-- adjustment -->
831///   |--------------------------|----------------------|--------------------|
832///   ^Derived address point     ^Base address point    ^Member address point
833///
834/// So when converting a base member pointer to a derived member pointer,
835/// we add the offset to the adjustment because the address point has
836/// decreased;  and conversely, when converting a derived MP to a base MP
837/// we subtract the offset from the adjustment because the address point
838/// has increased.
839///
840/// The standard forbids (at compile time) conversion to and from
841/// virtual bases, which is why we don't have to consider them here.
842///
843/// The standard forbids (at run time) casting a derived MP to a base
844/// MP when the derived MP does not point to a member of the base.
845/// This is why -1 is a reasonable choice for null data member
846/// pointers.
847llvm::Value *
848ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
849                                           const CastExpr *E,
850                                           llvm::Value *src) {
851  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
852         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
853         E->getCastKind() == CK_ReinterpretMemberPointer);
854
855  // Under Itanium, reinterprets don't require any additional processing.
856  if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
857
858  // Use constant emission if we can.
859  if (isa<llvm::Constant>(src))
860    return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
861
862  llvm::Constant *adj = getMemberPointerAdjustment(E);
863  if (!adj) return src;
864
865  CGBuilderTy &Builder = CGF.Builder;
866  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
867
868  const MemberPointerType *destTy =
869    E->getType()->castAs<MemberPointerType>();
870
871  // For member data pointers, this is just a matter of adding the
872  // offset if the source is non-null.
873  if (destTy->isMemberDataPointer()) {
874    llvm::Value *dst;
875    if (isDerivedToBase)
876      dst = Builder.CreateNSWSub(src, adj, "adj");
877    else
878      dst = Builder.CreateNSWAdd(src, adj, "adj");
879
880    // Null check.
881    llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
882    llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
883    return Builder.CreateSelect(isNull, src, dst);
884  }
885
886  // The this-adjustment is left-shifted by 1 on ARM.
887  if (UseARMMethodPtrABI) {
888    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
889    offset <<= 1;
890    adj = llvm::ConstantInt::get(adj->getType(), offset);
891  }
892
893  llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
894  llvm::Value *dstAdj;
895  if (isDerivedToBase)
896    dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
897  else
898    dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
899
900  return Builder.CreateInsertValue(src, dstAdj, 1);
901}
902
903llvm::Constant *
904ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
905                                           llvm::Constant *src) {
906  assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
907         E->getCastKind() == CK_BaseToDerivedMemberPointer ||
908         E->getCastKind() == CK_ReinterpretMemberPointer);
909
910  // Under Itanium, reinterprets don't require any additional processing.
911  if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
912
913  // If the adjustment is trivial, we don't need to do anything.
914  llvm::Constant *adj = getMemberPointerAdjustment(E);
915  if (!adj) return src;
916
917  bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
918
919  const MemberPointerType *destTy =
920    E->getType()->castAs<MemberPointerType>();
921
922  // For member data pointers, this is just a matter of adding the
923  // offset if the source is non-null.
924  if (destTy->isMemberDataPointer()) {
925    // null maps to null.
926    if (src->isAllOnesValue()) return src;
927
928    if (isDerivedToBase)
929      return llvm::ConstantExpr::getNSWSub(src, adj);
930    else
931      return llvm::ConstantExpr::getNSWAdd(src, adj);
932  }
933
934  // The this-adjustment is left-shifted by 1 on ARM.
935  if (UseARMMethodPtrABI) {
936    uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
937    offset <<= 1;
938    adj = llvm::ConstantInt::get(adj->getType(), offset);
939  }
940
941  llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
942  llvm::Constant *dstAdj;
943  if (isDerivedToBase)
944    dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
945  else
946    dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
947
948  return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
949}
950
951llvm::Constant *
952ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
953  // Itanium C++ ABI 2.3:
954  //   A NULL pointer is represented as -1.
955  if (MPT->isMemberDataPointer())
956    return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
957
958  llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
959  llvm::Constant *Values[2] = { Zero, Zero };
960  return llvm::ConstantStruct::getAnon(Values);
961}
962
963llvm::Constant *
964ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
965                                     CharUnits offset) {
966  // Itanium C++ ABI 2.3:
967  //   A pointer to data member is an offset from the base address of
968  //   the class object containing it, represented as a ptrdiff_t
969  return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
970}
971
972llvm::Constant *
973ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
974  return BuildMemberPointer(MD, CharUnits::Zero());
975}
976
977llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
978                                                  CharUnits ThisAdjustment) {
979  assert(MD->isInstance() && "Member function must not be static!");
980
981  CodeGenTypes &Types = CGM.getTypes();
982
983  // Get the function pointer (or index if this is a virtual function).
984  llvm::Constant *MemPtr[2];
985  if (MD->isVirtual()) {
986    uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
987
988    const ASTContext &Context = getContext();
989    CharUnits PointerWidth =
990      Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
991    uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
992
993    if (UseARMMethodPtrABI) {
994      // ARM C++ ABI 3.2.1:
995      //   This ABI specifies that adj contains twice the this
996      //   adjustment, plus 1 if the member function is virtual. The
997      //   least significant bit of adj then makes exactly the same
998      //   discrimination as the least significant bit of ptr does for
999      //   Itanium.
1000      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1001      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1002                                         2 * ThisAdjustment.getQuantity() + 1);
1003    } else {
1004      // Itanium C++ ABI 2.3:
1005      //   For a virtual function, [the pointer field] is 1 plus the
1006      //   virtual table offset (in bytes) of the function,
1007      //   represented as a ptrdiff_t.
1008      MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1009      MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1010                                         ThisAdjustment.getQuantity());
1011    }
1012  } else {
1013    const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1014    llvm::Type *Ty;
1015    // Check whether the function has a computable LLVM signature.
1016    if (Types.isFuncTypeConvertible(FPT)) {
1017      // The function has a computable LLVM signature; use the correct type.
1018      Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1019    } else {
1020      // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1021      // function type is incomplete.
1022      Ty = CGM.PtrDiffTy;
1023    }
1024    llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1025
1026    MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1027    MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1028                                       (UseARMMethodPtrABI ? 2 : 1) *
1029                                       ThisAdjustment.getQuantity());
1030  }
1031
1032  return llvm::ConstantStruct::getAnon(MemPtr);
1033}
1034
1035llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1036                                                 QualType MPType) {
1037  const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1038  const ValueDecl *MPD = MP.getMemberPointerDecl();
1039  if (!MPD)
1040    return EmitNullMemberPointer(MPT);
1041
1042  CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP);
1043
1044  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1045    return BuildMemberPointer(MD, ThisAdjustment);
1046
1047  CharUnits FieldOffset =
1048    getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1049  return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1050}
1051
1052/// The comparison algorithm is pretty easy: the member pointers are
1053/// the same if they're either bitwise identical *or* both null.
1054///
1055/// ARM is different here only because null-ness is more complicated.
1056llvm::Value *
1057ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1058                                           llvm::Value *L,
1059                                           llvm::Value *R,
1060                                           const MemberPointerType *MPT,
1061                                           bool Inequality) {
1062  CGBuilderTy &Builder = CGF.Builder;
1063
1064  llvm::ICmpInst::Predicate Eq;
1065  llvm::Instruction::BinaryOps And, Or;
1066  if (Inequality) {
1067    Eq = llvm::ICmpInst::ICMP_NE;
1068    And = llvm::Instruction::Or;
1069    Or = llvm::Instruction::And;
1070  } else {
1071    Eq = llvm::ICmpInst::ICMP_EQ;
1072    And = llvm::Instruction::And;
1073    Or = llvm::Instruction::Or;
1074  }
1075
1076  // Member data pointers are easy because there's a unique null
1077  // value, so it just comes down to bitwise equality.
1078  if (MPT->isMemberDataPointer())
1079    return Builder.CreateICmp(Eq, L, R);
1080
1081  // For member function pointers, the tautologies are more complex.
1082  // The Itanium tautology is:
1083  //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1084  // The ARM tautology is:
1085  //   (L == R) <==> (L.ptr == R.ptr &&
1086  //                  (L.adj == R.adj ||
1087  //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1088  // The inequality tautologies have exactly the same structure, except
1089  // applying De Morgan's laws.
1090
1091  llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1092  llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1093
1094  // This condition tests whether L.ptr == R.ptr.  This must always be
1095  // true for equality to hold.
1096  llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1097
1098  // This condition, together with the assumption that L.ptr == R.ptr,
1099  // tests whether the pointers are both null.  ARM imposes an extra
1100  // condition.
1101  llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1102  llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1103
1104  // This condition tests whether L.adj == R.adj.  If this isn't
1105  // true, the pointers are unequal unless they're both null.
1106  llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1107  llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1108  llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1109
1110  // Null member function pointers on ARM clear the low bit of Adj,
1111  // so the zero condition has to check that neither low bit is set.
1112  if (UseARMMethodPtrABI) {
1113    llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1114
1115    // Compute (l.adj | r.adj) & 1 and test it against zero.
1116    llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1117    llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1118    llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1119                                                      "cmp.or.adj");
1120    EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1121  }
1122
1123  // Tie together all our conditions.
1124  llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1125  Result = Builder.CreateBinOp(And, PtrEq, Result,
1126                               Inequality ? "memptr.ne" : "memptr.eq");
1127  return Result;
1128}
1129
1130llvm::Value *
1131ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1132                                          llvm::Value *MemPtr,
1133                                          const MemberPointerType *MPT) {
1134  CGBuilderTy &Builder = CGF.Builder;
1135
1136  /// For member data pointers, this is just a check against -1.
1137  if (MPT->isMemberDataPointer()) {
1138    assert(MemPtr->getType() == CGM.PtrDiffTy);
1139    llvm::Value *NegativeOne =
1140      llvm::Constant::getAllOnesValue(MemPtr->getType());
1141    return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1142  }
1143
1144  // In Itanium, a member function pointer is not null if 'ptr' is not null.
1145  llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1146
1147  llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1148  llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1149
1150  // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1151  // (the virtual bit) is set.
1152  if (UseARMMethodPtrABI) {
1153    llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1154    llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1155    llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1156    llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1157                                                  "memptr.isvirtual");
1158    Result = Builder.CreateOr(Result, IsVirtual);
1159  }
1160
1161  return Result;
1162}
1163
1164bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1165  const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1166  if (!RD)
1167    return false;
1168
1169  // If C++ prohibits us from making a copy, return by address.
1170  if (!RD->canPassInRegisters()) {
1171    auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1172    FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1173    return true;
1174  }
1175  return false;
1176}
1177
1178/// The Itanium ABI requires non-zero initialization only for data
1179/// member pointers, for which '0' is a valid offset.
1180bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1181  return MPT->isMemberFunctionPointer();
1182}
1183
1184/// The Itanium ABI always places an offset to the complete object
1185/// at entry -2 in the vtable.
1186void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1187                                            const CXXDeleteExpr *DE,
1188                                            Address Ptr,
1189                                            QualType ElementType,
1190                                            const CXXDestructorDecl *Dtor) {
1191  bool UseGlobalDelete = DE->isGlobalDelete();
1192  if (UseGlobalDelete) {
1193    // Derive the complete-object pointer, which is what we need
1194    // to pass to the deallocation function.
1195
1196    // Grab the vtable pointer as an intptr_t*.
1197    auto *ClassDecl =
1198        cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1199    llvm::Value *VTable =
1200        CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1201
1202    // Track back to entry -2 and pull out the offset there.
1203    llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1204        VTable, -2, "complete-offset.ptr");
1205    llvm::Value *Offset =
1206      CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1207
1208    // Apply the offset.
1209    llvm::Value *CompletePtr =
1210      CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1211    CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
1212
1213    // If we're supposed to call the global delete, make sure we do so
1214    // even if the destructor throws.
1215    CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1216                                    ElementType);
1217  }
1218
1219  // FIXME: Provide a source location here even though there's no
1220  // CXXMemberCallExpr for dtor call.
1221  CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1222  EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1223
1224  if (UseGlobalDelete)
1225    CGF.PopCleanupBlock();
1226}
1227
1228void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1229  // void __cxa_rethrow();
1230
1231  llvm::FunctionType *FTy =
1232    llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1233
1234  llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1235
1236  if (isNoReturn)
1237    CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1238  else
1239    CGF.EmitRuntimeCallOrInvoke(Fn);
1240}
1241
1242static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1243  // void *__cxa_allocate_exception(size_t thrown_size);
1244
1245  llvm::FunctionType *FTy =
1246    llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1247
1248  return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1249}
1250
1251static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1252  // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1253  //                  void (*dest) (void *));
1254
1255  llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1256  llvm::FunctionType *FTy =
1257    llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1258
1259  return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1260}
1261
1262void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1263  QualType ThrowType = E->getSubExpr()->getType();
1264  // Now allocate the exception object.
1265  llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1266  uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1267
1268  llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1269  llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1270      AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1271
1272  CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1273  CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1274
1275  // Now throw the exception.
1276  llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1277                                                         /*ForEH=*/true);
1278
1279  // The address of the destructor.  If the exception type has a
1280  // trivial destructor (or isn't a record), we just pass null.
1281  llvm::Constant *Dtor = nullptr;
1282  if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1283    CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1284    if (!Record->hasTrivialDestructor()) {
1285      CXXDestructorDecl *DtorD = Record->getDestructor();
1286      Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1287      Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1288    }
1289  }
1290  if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1291
1292  llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1293  CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1294}
1295
1296static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1297  // void *__dynamic_cast(const void *sub,
1298  //                      const abi::__class_type_info *src,
1299  //                      const abi::__class_type_info *dst,
1300  //                      std::ptrdiff_t src2dst_offset);
1301
1302  llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1303  llvm::Type *PtrDiffTy =
1304    CGF.ConvertType(CGF.getContext().getPointerDiffType());
1305
1306  llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1307
1308  llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1309
1310  // Mark the function as nounwind readonly.
1311  llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1312                                            llvm::Attribute::ReadOnly };
1313  llvm::AttributeList Attrs = llvm::AttributeList::get(
1314      CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1315
1316  return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1317}
1318
1319static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1320  // void __cxa_bad_cast();
1321  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1322  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1323}
1324
1325/// Compute the src2dst_offset hint as described in the
1326/// Itanium C++ ABI [2.9.7]
1327static CharUnits computeOffsetHint(ASTContext &Context,
1328                                   const CXXRecordDecl *Src,
1329                                   const CXXRecordDecl *Dst) {
1330  CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1331                     /*DetectVirtual=*/false);
1332
1333  // If Dst is not derived from Src we can skip the whole computation below and
1334  // return that Src is not a public base of Dst.  Record all inheritance paths.
1335  if (!Dst->isDerivedFrom(Src, Paths))
1336    return CharUnits::fromQuantity(-2ULL);
1337
1338  unsigned NumPublicPaths = 0;
1339  CharUnits Offset;
1340
1341  // Now walk all possible inheritance paths.
1342  for (const CXXBasePath &Path : Paths) {
1343    if (Path.Access != AS_public)  // Ignore non-public inheritance.
1344      continue;
1345
1346    ++NumPublicPaths;
1347
1348    for (const CXXBasePathElement &PathElement : Path) {
1349      // If the path contains a virtual base class we can't give any hint.
1350      // -1: no hint.
1351      if (PathElement.Base->isVirtual())
1352        return CharUnits::fromQuantity(-1ULL);
1353
1354      if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1355        continue;
1356
1357      // Accumulate the base class offsets.
1358      const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1359      Offset += L.getBaseClassOffset(
1360          PathElement.Base->getType()->getAsCXXRecordDecl());
1361    }
1362  }
1363
1364  // -2: Src is not a public base of Dst.
1365  if (NumPublicPaths == 0)
1366    return CharUnits::fromQuantity(-2ULL);
1367
1368  // -3: Src is a multiple public base type but never a virtual base type.
1369  if (NumPublicPaths > 1)
1370    return CharUnits::fromQuantity(-3ULL);
1371
1372  // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1373  // Return the offset of Src from the origin of Dst.
1374  return Offset;
1375}
1376
1377static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1378  // void __cxa_bad_typeid();
1379  llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1380
1381  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1382}
1383
1384bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1385                                              QualType SrcRecordTy) {
1386  return IsDeref;
1387}
1388
1389void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1390  llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1391  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1392  Call->setDoesNotReturn();
1393  CGF.Builder.CreateUnreachable();
1394}
1395
1396llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1397                                       QualType SrcRecordTy,
1398                                       Address ThisPtr,
1399                                       llvm::Type *StdTypeInfoPtrTy) {
1400  auto *ClassDecl =
1401      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1402  llvm::Value *Value =
1403      CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1404
1405  // Load the type info.
1406  Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1407  return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
1408}
1409
1410bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1411                                                       QualType SrcRecordTy) {
1412  return SrcIsPtr;
1413}
1414
1415llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1416    CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1417    QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1418  llvm::Type *PtrDiffLTy =
1419      CGF.ConvertType(CGF.getContext().getPointerDiffType());
1420  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1421
1422  llvm::Value *SrcRTTI =
1423      CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1424  llvm::Value *DestRTTI =
1425      CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1426
1427  // Compute the offset hint.
1428  const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1429  const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1430  llvm::Value *OffsetHint = llvm::ConstantInt::get(
1431      PtrDiffLTy,
1432      computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1433
1434  // Emit the call to __dynamic_cast.
1435  llvm::Value *Value = ThisAddr.getPointer();
1436  Value = CGF.EmitCastToVoidPtr(Value);
1437
1438  llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1439  Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1440  Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1441
1442  /// C++ [expr.dynamic.cast]p9:
1443  ///   A failed cast to reference type throws std::bad_cast
1444  if (DestTy->isReferenceType()) {
1445    llvm::BasicBlock *BadCastBlock =
1446        CGF.createBasicBlock("dynamic_cast.bad_cast");
1447
1448    llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1449    CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1450
1451    CGF.EmitBlock(BadCastBlock);
1452    EmitBadCastCall(CGF);
1453  }
1454
1455  return Value;
1456}
1457
1458llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1459                                                  Address ThisAddr,
1460                                                  QualType SrcRecordTy,
1461                                                  QualType DestTy) {
1462  llvm::Type *PtrDiffLTy =
1463      CGF.ConvertType(CGF.getContext().getPointerDiffType());
1464  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1465
1466  auto *ClassDecl =
1467      cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1468  // Get the vtable pointer.
1469  llvm::Value *VTable = CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(),
1470      ClassDecl);
1471
1472  // Get the offset-to-top from the vtable.
1473  llvm::Value *OffsetToTop =
1474      CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1475  OffsetToTop =
1476    CGF.Builder.CreateAlignedLoad(OffsetToTop, CGF.getPointerAlign(),
1477                                  "offset.to.top");
1478
1479  // Finally, add the offset to the pointer.
1480  llvm::Value *Value = ThisAddr.getPointer();
1481  Value = CGF.EmitCastToVoidPtr(Value);
1482  Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1483
1484  return CGF.Builder.CreateBitCast(Value, DestLTy);
1485}
1486
1487bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1488  llvm::FunctionCallee Fn = getBadCastFn(CGF);
1489  llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1490  Call->setDoesNotReturn();
1491  CGF.Builder.CreateUnreachable();
1492  return true;
1493}
1494
1495llvm::Value *
1496ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1497                                         Address This,
1498                                         const CXXRecordDecl *ClassDecl,
1499                                         const CXXRecordDecl *BaseClassDecl) {
1500  llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1501  CharUnits VBaseOffsetOffset =
1502      CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1503                                                               BaseClassDecl);
1504
1505  llvm::Value *VBaseOffsetPtr =
1506    CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1507                                   "vbase.offset.ptr");
1508  VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1509                                             CGM.PtrDiffTy->getPointerTo());
1510
1511  llvm::Value *VBaseOffset =
1512    CGF.Builder.CreateAlignedLoad(VBaseOffsetPtr, CGF.getPointerAlign(),
1513                                  "vbase.offset");
1514
1515  return VBaseOffset;
1516}
1517
1518void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1519  // Just make sure we're in sync with TargetCXXABI.
1520  assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1521
1522  // The constructor used for constructing this as a base class;
1523  // ignores virtual bases.
1524  CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1525
1526  // The constructor used for constructing this as a complete class;
1527  // constructs the virtual bases, then calls the base constructor.
1528  if (!D->getParent()->isAbstract()) {
1529    // We don't need to emit the complete ctor if the class is abstract.
1530    CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1531  }
1532}
1533
1534CGCXXABI::AddedStructorArgs
1535ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1536                                      SmallVectorImpl<CanQualType> &ArgTys) {
1537  ASTContext &Context = getContext();
1538
1539  // All parameters are already in place except VTT, which goes after 'this'.
1540  // These are Clang types, so we don't need to worry about sret yet.
1541
1542  // Check if we need to add a VTT parameter (which has type void **).
1543  if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1544                                             : GD.getDtorType() == Dtor_Base) &&
1545      cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1546    ArgTys.insert(ArgTys.begin() + 1,
1547                  Context.getPointerType(Context.VoidPtrTy));
1548    return AddedStructorArgs::prefix(1);
1549  }
1550  return AddedStructorArgs{};
1551}
1552
1553void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1554  // The destructor used for destructing this as a base class; ignores
1555  // virtual bases.
1556  CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1557
1558  // The destructor used for destructing this as a most-derived class;
1559  // call the base destructor and then destructs any virtual bases.
1560  CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1561
1562  // The destructor in a virtual table is always a 'deleting'
1563  // destructor, which calls the complete destructor and then uses the
1564  // appropriate operator delete.
1565  if (D->isVirtual())
1566    CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1567}
1568
1569void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1570                                              QualType &ResTy,
1571                                              FunctionArgList &Params) {
1572  const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1573  assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1574
1575  // Check if we need a VTT parameter as well.
1576  if (NeedsVTTParameter(CGF.CurGD)) {
1577    ASTContext &Context = getContext();
1578
1579    // FIXME: avoid the fake decl
1580    QualType T = Context.getPointerType(Context.VoidPtrTy);
1581    auto *VTTDecl = ImplicitParamDecl::Create(
1582        Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1583        T, ImplicitParamDecl::CXXVTT);
1584    Params.insert(Params.begin() + 1, VTTDecl);
1585    getStructorImplicitParamDecl(CGF) = VTTDecl;
1586  }
1587}
1588
1589void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1590  // Naked functions have no prolog.
1591  if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1592    return;
1593
1594  /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1595  /// adjustments are required, because they are all handled by thunks.
1596  setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1597
1598  /// Initialize the 'vtt' slot if needed.
1599  if (getStructorImplicitParamDecl(CGF)) {
1600    getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1601        CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1602  }
1603
1604  /// If this is a function that the ABI specifies returns 'this', initialize
1605  /// the return slot to 'this' at the start of the function.
1606  ///
1607  /// Unlike the setting of return types, this is done within the ABI
1608  /// implementation instead of by clients of CGCXXABI because:
1609  /// 1) getThisValue is currently protected
1610  /// 2) in theory, an ABI could implement 'this' returns some other way;
1611  ///    HasThisReturn only specifies a contract, not the implementation
1612  if (HasThisReturn(CGF.CurGD))
1613    CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1614}
1615
1616CGCXXABI::AddedStructorArgs ItaniumCXXABI::addImplicitConstructorArgs(
1617    CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1618    bool ForVirtualBase, bool Delegating, CallArgList &Args) {
1619  if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1620    return AddedStructorArgs{};
1621
1622  // Insert the implicit 'vtt' argument as the second argument.
1623  llvm::Value *VTT =
1624      CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1625  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1626  Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy));
1627  return AddedStructorArgs::prefix(1);  // Added one arg.
1628}
1629
1630void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1631                                       const CXXDestructorDecl *DD,
1632                                       CXXDtorType Type, bool ForVirtualBase,
1633                                       bool Delegating, Address This,
1634                                       QualType ThisTy) {
1635  GlobalDecl GD(DD, Type);
1636  llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1637  QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1638
1639  CGCallee Callee;
1640  if (getContext().getLangOpts().AppleKext &&
1641      Type != Dtor_Base && DD->isVirtual())
1642    Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1643  else
1644    Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1645
1646  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1647                            nullptr);
1648}
1649
1650void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1651                                          const CXXRecordDecl *RD) {
1652  llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1653  if (VTable->hasInitializer())
1654    return;
1655
1656  ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1657  const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1658  llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1659  llvm::Constant *RTTI =
1660      CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1661
1662  // Create and set the initializer.
1663  ConstantInitBuilder Builder(CGM);
1664  auto Components = Builder.beginStruct();
1665  CGVT.createVTableInitializer(Components, VTLayout, RTTI);
1666  Components.finishAndSetAsInitializer(VTable);
1667
1668  // Set the correct linkage.
1669  VTable->setLinkage(Linkage);
1670
1671  if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1672    VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1673
1674  // Set the right visibility.
1675  CGM.setGVProperties(VTable, RD);
1676
1677  // If this is the magic class __cxxabiv1::__fundamental_type_info,
1678  // we will emit the typeinfo for the fundamental types. This is the
1679  // same behaviour as GCC.
1680  const DeclContext *DC = RD->getDeclContext();
1681  if (RD->getIdentifier() &&
1682      RD->getIdentifier()->isStr("__fundamental_type_info") &&
1683      isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1684      cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1685      DC->getParent()->isTranslationUnit())
1686    EmitFundamentalRTTIDescriptors(RD);
1687
1688  if (!VTable->isDeclarationForLinker())
1689    CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1690}
1691
1692bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1693    CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1694  if (Vptr.NearestVBase == nullptr)
1695    return false;
1696  return NeedsVTTParameter(CGF.CurGD);
1697}
1698
1699llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1700    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1701    const CXXRecordDecl *NearestVBase) {
1702
1703  if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1704      NeedsVTTParameter(CGF.CurGD)) {
1705    return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1706                                                  NearestVBase);
1707  }
1708  return getVTableAddressPoint(Base, VTableClass);
1709}
1710
1711llvm::Constant *
1712ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1713                                     const CXXRecordDecl *VTableClass) {
1714  llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1715
1716  // Find the appropriate vtable within the vtable group, and the address point
1717  // within that vtable.
1718  VTableLayout::AddressPointLocation AddressPoint =
1719      CGM.getItaniumVTableContext()
1720          .getVTableLayout(VTableClass)
1721          .getAddressPoint(Base);
1722  llvm::Value *Indices[] = {
1723    llvm::ConstantInt::get(CGM.Int32Ty, 0),
1724    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1725    llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1726  };
1727
1728  return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1729                                              Indices, /*InBounds=*/true,
1730                                              /*InRangeIndex=*/1);
1731}
1732
1733llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1734    CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1735    const CXXRecordDecl *NearestVBase) {
1736  assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1737         NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1738
1739  // Get the secondary vpointer index.
1740  uint64_t VirtualPointerIndex =
1741      CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1742
1743  /// Load the VTT.
1744  llvm::Value *VTT = CGF.LoadCXXVTT();
1745  if (VirtualPointerIndex)
1746    VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1747
1748  // And load the address point from the VTT.
1749  return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
1750}
1751
1752llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1753    BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1754  return getVTableAddressPoint(Base, VTableClass);
1755}
1756
1757llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1758                                                     CharUnits VPtrOffset) {
1759  assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1760
1761  llvm::GlobalVariable *&VTable = VTables[RD];
1762  if (VTable)
1763    return VTable;
1764
1765  // Queue up this vtable for possible deferred emission.
1766  CGM.addDeferredVTable(RD);
1767
1768  SmallString<256> Name;
1769  llvm::raw_svector_ostream Out(Name);
1770  getMangleContext().mangleCXXVTable(RD, Out);
1771
1772  const VTableLayout &VTLayout =
1773      CGM.getItaniumVTableContext().getVTableLayout(RD);
1774  llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1775
1776  // Use pointer alignment for the vtable. Otherwise we would align them based
1777  // on the size of the initializer which doesn't make sense as only single
1778  // values are read.
1779  unsigned PAlign = CGM.getTarget().getPointerAlign(0);
1780
1781  VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1782      Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1783      getContext().toCharUnitsFromBits(PAlign).getQuantity());
1784  VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1785
1786  CGM.setGVProperties(VTable, RD);
1787
1788  return VTable;
1789}
1790
1791CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1792                                                  GlobalDecl GD,
1793                                                  Address This,
1794                                                  llvm::Type *Ty,
1795                                                  SourceLocation Loc) {
1796  Ty = Ty->getPointerTo()->getPointerTo();
1797  auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1798  llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
1799
1800  uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1801  llvm::Value *VFunc;
1802  if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1803    VFunc = CGF.EmitVTableTypeCheckedLoad(
1804        MethodDecl->getParent(), VTable,
1805        VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1806  } else {
1807    CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1808
1809    llvm::Value *VFuncPtr =
1810        CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1811    auto *VFuncLoad =
1812        CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
1813
1814    // Add !invariant.load md to virtual function load to indicate that
1815    // function didn't change inside vtable.
1816    // It's safe to add it without -fstrict-vtable-pointers, but it would not
1817    // help in devirtualization because it will only matter if we will have 2
1818    // the same virtual function loads from the same vtable load, which won't
1819    // happen without enabled devirtualization with -fstrict-vtable-pointers.
1820    if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1821        CGM.getCodeGenOpts().StrictVTablePointers)
1822      VFuncLoad->setMetadata(
1823          llvm::LLVMContext::MD_invariant_load,
1824          llvm::MDNode::get(CGM.getLLVMContext(),
1825                            llvm::ArrayRef<llvm::Metadata *>()));
1826    VFunc = VFuncLoad;
1827  }
1828
1829  CGCallee Callee(GD, VFunc);
1830  return Callee;
1831}
1832
1833llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1834    CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1835    Address This, DeleteOrMemberCallExpr E) {
1836  auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1837  auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1838  assert((CE != nullptr) ^ (D != nullptr));
1839  assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1840  assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1841
1842  GlobalDecl GD(Dtor, DtorType);
1843  const CGFunctionInfo *FInfo =
1844      &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1845  llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1846  CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1847
1848  QualType ThisTy;
1849  if (CE) {
1850    ThisTy = CE->getObjectType();
1851  } else {
1852    ThisTy = D->getDestroyedType();
1853  }
1854
1855  CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1856                            QualType(), nullptr);
1857  return nullptr;
1858}
1859
1860void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
1861  CodeGenVTables &VTables = CGM.getVTables();
1862  llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
1863  VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
1864}
1865
1866bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
1867    const CXXRecordDecl *RD) const {
1868  // We don't emit available_externally vtables if we are in -fapple-kext mode
1869  // because kext mode does not permit devirtualization.
1870  if (CGM.getLangOpts().AppleKext)
1871    return false;
1872
1873  // If the vtable is hidden then it is not safe to emit an available_externally
1874  // copy of vtable.
1875  if (isVTableHidden(RD))
1876    return false;
1877
1878  if (CGM.getCodeGenOpts().ForceEmitVTables)
1879    return true;
1880
1881  // If we don't have any not emitted inline virtual function then we are safe
1882  // to emit an available_externally copy of vtable.
1883  // FIXME we can still emit a copy of the vtable if we
1884  // can emit definition of the inline functions.
1885  if (hasAnyUnusedVirtualInlineFunction(RD))
1886    return false;
1887
1888  // For a class with virtual bases, we must also be able to speculatively
1889  // emit the VTT, because CodeGen doesn't have separate notions of "can emit
1890  // the vtable" and "can emit the VTT". For a base subobject, this means we
1891  // need to be able to emit non-virtual base vtables.
1892  if (RD->getNumVBases()) {
1893    for (const auto &B : RD->bases()) {
1894      auto *BRD = B.getType()->getAsCXXRecordDecl();
1895      assert(BRD && "no class for base specifier");
1896      if (B.isVirtual() || !BRD->isDynamicClass())
1897        continue;
1898      if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1899        return false;
1900    }
1901  }
1902
1903  return true;
1904}
1905
1906bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
1907  if (!canSpeculativelyEmitVTableAsBaseClass(RD))
1908    return false;
1909
1910  // For a complete-object vtable (or more specifically, for the VTT), we need
1911  // to be able to speculatively emit the vtables of all dynamic virtual bases.
1912  for (const auto &B : RD->vbases()) {
1913    auto *BRD = B.getType()->getAsCXXRecordDecl();
1914    assert(BRD && "no class for base specifier");
1915    if (!BRD->isDynamicClass())
1916      continue;
1917    if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
1918      return false;
1919  }
1920
1921  return true;
1922}
1923static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
1924                                          Address InitialPtr,
1925                                          int64_t NonVirtualAdjustment,
1926                                          int64_t VirtualAdjustment,
1927                                          bool IsReturnAdjustment) {
1928  if (!NonVirtualAdjustment && !VirtualAdjustment)
1929    return InitialPtr.getPointer();
1930
1931  Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
1932
1933  // In a base-to-derived cast, the non-virtual adjustment is applied first.
1934  if (NonVirtualAdjustment && !IsReturnAdjustment) {
1935    V = CGF.Builder.CreateConstInBoundsByteGEP(V,
1936                              CharUnits::fromQuantity(NonVirtualAdjustment));
1937  }
1938
1939  // Perform the virtual adjustment if we have one.
1940  llvm::Value *ResultPtr;
1941  if (VirtualAdjustment) {
1942    llvm::Type *PtrDiffTy =
1943        CGF.ConvertType(CGF.getContext().getPointerDiffType());
1944
1945    Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
1946    llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
1947
1948    llvm::Value *OffsetPtr =
1949        CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
1950
1951    OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
1952
1953    // Load the adjustment offset from the vtable.
1954    llvm::Value *Offset =
1955      CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
1956
1957    // Adjust our pointer.
1958    ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
1959  } else {
1960    ResultPtr = V.getPointer();
1961  }
1962
1963  // In a derived-to-base conversion, the non-virtual adjustment is
1964  // applied second.
1965  if (NonVirtualAdjustment && IsReturnAdjustment) {
1966    ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
1967                                                       NonVirtualAdjustment);
1968  }
1969
1970  // Cast back to the original type.
1971  return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
1972}
1973
1974llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
1975                                                  Address This,
1976                                                  const ThisAdjustment &TA) {
1977  return performTypeAdjustment(CGF, This, TA.NonVirtual,
1978                               TA.Virtual.Itanium.VCallOffsetOffset,
1979                               /*IsReturnAdjustment=*/false);
1980}
1981
1982llvm::Value *
1983ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
1984                                       const ReturnAdjustment &RA) {
1985  return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
1986                               RA.Virtual.Itanium.VBaseOffsetOffset,
1987                               /*IsReturnAdjustment=*/true);
1988}
1989
1990void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
1991                                    RValue RV, QualType ResultType) {
1992  if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
1993    return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
1994
1995  // Destructor thunks in the ARM ABI have indeterminate results.
1996  llvm::Type *T = CGF.ReturnValue.getElementType();
1997  RValue Undef = RValue::get(llvm::UndefValue::get(T));
1998  return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
1999}
2000
2001/************************** Array allocation cookies **************************/
2002
2003CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2004  // The array cookie is a size_t; pad that up to the element alignment.
2005  // The cookie is actually right-justified in that space.
2006  return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2007                  CGM.getContext().getTypeAlignInChars(elementType));
2008}
2009
2010Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2011                                             Address NewPtr,
2012                                             llvm::Value *NumElements,
2013                                             const CXXNewExpr *expr,
2014                                             QualType ElementType) {
2015  assert(requiresArrayCookie(expr));
2016
2017  unsigned AS = NewPtr.getAddressSpace();
2018
2019  ASTContext &Ctx = getContext();
2020  CharUnits SizeSize = CGF.getSizeSize();
2021
2022  // The size of the cookie.
2023  CharUnits CookieSize =
2024    std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
2025  assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2026
2027  // Compute an offset to the cookie.
2028  Address CookiePtr = NewPtr;
2029  CharUnits CookieOffset = CookieSize - SizeSize;
2030  if (!CookieOffset.isZero())
2031    CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2032
2033  // Write the number of elements into the appropriate slot.
2034  Address NumElementsPtr =
2035      CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2036  llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2037
2038  // Handle the array cookie specially in ASan.
2039  if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2040      (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2041       CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2042    // The store to the CookiePtr does not need to be instrumented.
2043    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2044    llvm::FunctionType *FTy =
2045        llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2046    llvm::FunctionCallee F =
2047        CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2048    CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2049  }
2050
2051  // Finally, compute a pointer to the actual data buffer by skipping
2052  // over the cookie completely.
2053  return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2054}
2055
2056llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2057                                                Address allocPtr,
2058                                                CharUnits cookieSize) {
2059  // The element size is right-justified in the cookie.
2060  Address numElementsPtr = allocPtr;
2061  CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2062  if (!numElementsOffset.isZero())
2063    numElementsPtr =
2064      CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2065
2066  unsigned AS = allocPtr.getAddressSpace();
2067  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2068  if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2069    return CGF.Builder.CreateLoad(numElementsPtr);
2070  // In asan mode emit a function call instead of a regular load and let the
2071  // run-time deal with it: if the shadow is properly poisoned return the
2072  // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2073  // We can't simply ignore this load using nosanitize metadata because
2074  // the metadata may be lost.
2075  llvm::FunctionType *FTy =
2076      llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2077  llvm::FunctionCallee F =
2078      CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2079  return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2080}
2081
2082CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2083  // ARM says that the cookie is always:
2084  //   struct array_cookie {
2085  //     std::size_t element_size; // element_size != 0
2086  //     std::size_t element_count;
2087  //   };
2088  // But the base ABI doesn't give anything an alignment greater than
2089  // 8, so we can dismiss this as typical ABI-author blindness to
2090  // actual language complexity and round up to the element alignment.
2091  return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2092                  CGM.getContext().getTypeAlignInChars(elementType));
2093}
2094
2095Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2096                                         Address newPtr,
2097                                         llvm::Value *numElements,
2098                                         const CXXNewExpr *expr,
2099                                         QualType elementType) {
2100  assert(requiresArrayCookie(expr));
2101
2102  // The cookie is always at the start of the buffer.
2103  Address cookie = newPtr;
2104
2105  // The first element is the element size.
2106  cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2107  llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2108                 getContext().getTypeSizeInChars(elementType).getQuantity());
2109  CGF.Builder.CreateStore(elementSize, cookie);
2110
2111  // The second element is the element count.
2112  cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2113  CGF.Builder.CreateStore(numElements, cookie);
2114
2115  // Finally, compute a pointer to the actual data buffer by skipping
2116  // over the cookie completely.
2117  CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2118  return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2119}
2120
2121llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2122                                            Address allocPtr,
2123                                            CharUnits cookieSize) {
2124  // The number of elements is at offset sizeof(size_t) relative to
2125  // the allocated pointer.
2126  Address numElementsPtr
2127    = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2128
2129  numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2130  return CGF.Builder.CreateLoad(numElementsPtr);
2131}
2132
2133/*********************** Static local initialization **************************/
2134
2135static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2136                                              llvm::PointerType *GuardPtrTy) {
2137  // int __cxa_guard_acquire(__guard *guard_object);
2138  llvm::FunctionType *FTy =
2139    llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2140                            GuardPtrTy, /*isVarArg=*/false);
2141  return CGM.CreateRuntimeFunction(
2142      FTy, "__cxa_guard_acquire",
2143      llvm::AttributeList::get(CGM.getLLVMContext(),
2144                               llvm::AttributeList::FunctionIndex,
2145                               llvm::Attribute::NoUnwind));
2146}
2147
2148static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2149                                              llvm::PointerType *GuardPtrTy) {
2150  // void __cxa_guard_release(__guard *guard_object);
2151  llvm::FunctionType *FTy =
2152    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2153  return CGM.CreateRuntimeFunction(
2154      FTy, "__cxa_guard_release",
2155      llvm::AttributeList::get(CGM.getLLVMContext(),
2156                               llvm::AttributeList::FunctionIndex,
2157                               llvm::Attribute::NoUnwind));
2158}
2159
2160static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2161                                            llvm::PointerType *GuardPtrTy) {
2162  // void __cxa_guard_abort(__guard *guard_object);
2163  llvm::FunctionType *FTy =
2164    llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2165  return CGM.CreateRuntimeFunction(
2166      FTy, "__cxa_guard_abort",
2167      llvm::AttributeList::get(CGM.getLLVMContext(),
2168                               llvm::AttributeList::FunctionIndex,
2169                               llvm::Attribute::NoUnwind));
2170}
2171
2172namespace {
2173  struct CallGuardAbort final : EHScopeStack::Cleanup {
2174    llvm::GlobalVariable *Guard;
2175    CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2176
2177    void Emit(CodeGenFunction &CGF, Flags flags) override {
2178      CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2179                                  Guard);
2180    }
2181  };
2182}
2183
2184/// The ARM code here follows the Itanium code closely enough that we
2185/// just special-case it at particular places.
2186void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2187                                    const VarDecl &D,
2188                                    llvm::GlobalVariable *var,
2189                                    bool shouldPerformInit) {
2190  CGBuilderTy &Builder = CGF.Builder;
2191
2192  // Inline variables that weren't instantiated from variable templates have
2193  // partially-ordered initialization within their translation unit.
2194  bool NonTemplateInline =
2195      D.isInline() &&
2196      !isTemplateInstantiation(D.getTemplateSpecializationKind());
2197
2198  // We only need to use thread-safe statics for local non-TLS variables and
2199  // inline variables; other global initialization is always single-threaded
2200  // or (through lazy dynamic loading in multiple threads) unsequenced.
2201  bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2202                    (D.isLocalVarDecl() || NonTemplateInline) &&
2203                    !D.getTLSKind();
2204
2205  // If we have a global variable with internal linkage and thread-safe statics
2206  // are disabled, we can just let the guard variable be of type i8.
2207  bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2208
2209  llvm::IntegerType *guardTy;
2210  CharUnits guardAlignment;
2211  if (useInt8GuardVariable) {
2212    guardTy = CGF.Int8Ty;
2213    guardAlignment = CharUnits::One();
2214  } else {
2215    // Guard variables are 64 bits in the generic ABI and size width on ARM
2216    // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2217    if (UseARMGuardVarABI) {
2218      guardTy = CGF.SizeTy;
2219      guardAlignment = CGF.getSizeAlign();
2220    } else {
2221      guardTy = CGF.Int64Ty;
2222      guardAlignment = CharUnits::fromQuantity(
2223                             CGM.getDataLayout().getABITypeAlignment(guardTy));
2224    }
2225  }
2226  llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
2227
2228  // Create the guard variable if we don't already have it (as we
2229  // might if we're double-emitting this function body).
2230  llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2231  if (!guard) {
2232    // Mangle the name for the guard.
2233    SmallString<256> guardName;
2234    {
2235      llvm::raw_svector_ostream out(guardName);
2236      getMangleContext().mangleStaticGuardVariable(&D, out);
2237    }
2238
2239    // Create the guard variable with a zero-initializer.
2240    // Just absorb linkage and visibility from the guarded variable.
2241    guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2242                                     false, var->getLinkage(),
2243                                     llvm::ConstantInt::get(guardTy, 0),
2244                                     guardName.str());
2245    guard->setDSOLocal(var->isDSOLocal());
2246    guard->setVisibility(var->getVisibility());
2247    // If the variable is thread-local, so is its guard variable.
2248    guard->setThreadLocalMode(var->getThreadLocalMode());
2249    guard->setAlignment(guardAlignment.getAsAlign());
2250
2251    // The ABI says: "It is suggested that it be emitted in the same COMDAT
2252    // group as the associated data object." In practice, this doesn't work for
2253    // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2254    llvm::Comdat *C = var->getComdat();
2255    if (!D.isLocalVarDecl() && C &&
2256        (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2257         CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2258      guard->setComdat(C);
2259      // An inline variable's guard function is run from the per-TU
2260      // initialization function, not via a dedicated global ctor function, so
2261      // we can't put it in a comdat.
2262      if (!NonTemplateInline)
2263        CGF.CurFn->setComdat(C);
2264    } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2265      guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2266    }
2267
2268    CGM.setStaticLocalDeclGuardAddress(&D, guard);
2269  }
2270
2271  Address guardAddr = Address(guard, guardAlignment);
2272
2273  // Test whether the variable has completed initialization.
2274  //
2275  // Itanium C++ ABI 3.3.2:
2276  //   The following is pseudo-code showing how these functions can be used:
2277  //     if (obj_guard.first_byte == 0) {
2278  //       if ( __cxa_guard_acquire (&obj_guard) ) {
2279  //         try {
2280  //           ... initialize the object ...;
2281  //         } catch (...) {
2282  //            __cxa_guard_abort (&obj_guard);
2283  //            throw;
2284  //         }
2285  //         ... queue object destructor with __cxa_atexit() ...;
2286  //         __cxa_guard_release (&obj_guard);
2287  //       }
2288  //     }
2289
2290  // Load the first byte of the guard variable.
2291  llvm::LoadInst *LI =
2292      Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2293
2294  // Itanium ABI:
2295  //   An implementation supporting thread-safety on multiprocessor
2296  //   systems must also guarantee that references to the initialized
2297  //   object do not occur before the load of the initialization flag.
2298  //
2299  // In LLVM, we do this by marking the load Acquire.
2300  if (threadsafe)
2301    LI->setAtomic(llvm::AtomicOrdering::Acquire);
2302
2303  // For ARM, we should only check the first bit, rather than the entire byte:
2304  //
2305  // ARM C++ ABI 3.2.3.1:
2306  //   To support the potential use of initialization guard variables
2307  //   as semaphores that are the target of ARM SWP and LDREX/STREX
2308  //   synchronizing instructions we define a static initialization
2309  //   guard variable to be a 4-byte aligned, 4-byte word with the
2310  //   following inline access protocol.
2311  //     #define INITIALIZED 1
2312  //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2313  //       if (__cxa_guard_acquire(&obj_guard))
2314  //         ...
2315  //     }
2316  //
2317  // and similarly for ARM64:
2318  //
2319  // ARM64 C++ ABI 3.2.2:
2320  //   This ABI instead only specifies the value bit 0 of the static guard
2321  //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2322  //   variable is not initialized and 1 when it is.
2323  llvm::Value *V =
2324      (UseARMGuardVarABI && !useInt8GuardVariable)
2325          ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2326          : LI;
2327  llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2328
2329  llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2330  llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2331
2332  // Check if the first byte of the guard variable is zero.
2333  CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2334                               CodeGenFunction::GuardKind::VariableGuard, &D);
2335
2336  CGF.EmitBlock(InitCheckBlock);
2337
2338  // Variables used when coping with thread-safe statics and exceptions.
2339  if (threadsafe) {
2340    // Call __cxa_guard_acquire.
2341    llvm::Value *V
2342      = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2343
2344    llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2345
2346    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2347                         InitBlock, EndBlock);
2348
2349    // Call __cxa_guard_abort along the exceptional edge.
2350    CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2351
2352    CGF.EmitBlock(InitBlock);
2353  }
2354
2355  // Emit the initializer and add a global destructor if appropriate.
2356  CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2357
2358  if (threadsafe) {
2359    // Pop the guard-abort cleanup if we pushed one.
2360    CGF.PopCleanupBlock();
2361
2362    // Call __cxa_guard_release.  This cannot throw.
2363    CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2364                                guardAddr.getPointer());
2365  } else {
2366    Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
2367  }
2368
2369  CGF.EmitBlock(EndBlock);
2370}
2371
2372/// Register a global destructor using __cxa_atexit.
2373static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2374                                        llvm::FunctionCallee dtor,
2375                                        llvm::Constant *addr, bool TLS) {
2376  assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2377         "__cxa_atexit is disabled");
2378  const char *Name = "__cxa_atexit";
2379  if (TLS) {
2380    const llvm::Triple &T = CGF.getTarget().getTriple();
2381    Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2382  }
2383
2384  // We're assuming that the destructor function is something we can
2385  // reasonably call with the default CC.  Go ahead and cast it to the
2386  // right prototype.
2387  llvm::Type *dtorTy =
2388    llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2389
2390  // Preserve address space of addr.
2391  auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2392  auto AddrInt8PtrTy =
2393      AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2394
2395  // Create a variable that binds the atexit to this shared object.
2396  llvm::Constant *handle =
2397      CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2398  auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2399  GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2400
2401  // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2402  llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2403  llvm::FunctionType *atexitTy =
2404    llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2405
2406  // Fetch the actual function.
2407  llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2408  if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2409    fn->setDoesNotThrow();
2410
2411  if (!addr)
2412    // addr is null when we are trying to register a dtor annotated with
2413    // __attribute__((destructor)) in a constructor function. Using null here is
2414    // okay because this argument is just passed back to the destructor
2415    // function.
2416    addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2417
2418  llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2419                             cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2420                         llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2421                         handle};
2422  CGF.EmitNounwindRuntimeCall(atexit, args);
2423}
2424
2425void CodeGenModule::registerGlobalDtorsWithAtExit() {
2426  for (const auto &I : DtorsUsingAtExit) {
2427    int Priority = I.first;
2428    const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2429
2430    // Create a function that registers destructors that have the same priority.
2431    //
2432    // Since constructor functions are run in non-descending order of their
2433    // priorities, destructors are registered in non-descending order of their
2434    // priorities, and since destructor functions are run in the reverse order
2435    // of their registration, destructor functions are run in non-ascending
2436    // order of their priorities.
2437    CodeGenFunction CGF(*this);
2438    std::string GlobalInitFnName =
2439        std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2440    llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
2441    llvm::Function *GlobalInitFn = CreateGlobalInitOrDestructFunction(
2442        FTy, GlobalInitFnName, getTypes().arrangeNullaryFunction(),
2443        SourceLocation());
2444    ASTContext &Ctx = getContext();
2445    QualType ReturnTy = Ctx.VoidTy;
2446    QualType FunctionTy = Ctx.getFunctionType(ReturnTy, llvm::None, {});
2447    FunctionDecl *FD = FunctionDecl::Create(
2448        Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
2449        &Ctx.Idents.get(GlobalInitFnName), FunctionTy, nullptr, SC_Static,
2450        false, false);
2451    CGF.StartFunction(GlobalDecl(FD), ReturnTy, GlobalInitFn,
2452                      getTypes().arrangeNullaryFunction(), FunctionArgList(),
2453                      SourceLocation(), SourceLocation());
2454
2455    for (auto *Dtor : Dtors) {
2456      // Register the destructor function calling __cxa_atexit if it is
2457      // available. Otherwise fall back on calling atexit.
2458      if (getCodeGenOpts().CXAAtExit)
2459        emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2460      else
2461        CGF.registerGlobalDtorWithAtExit(Dtor);
2462    }
2463
2464    CGF.FinishFunction();
2465    AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2466  }
2467}
2468
2469/// Register a global destructor as best as we know how.
2470void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2471                                       llvm::FunctionCallee dtor,
2472                                       llvm::Constant *addr) {
2473  if (D.isNoDestroy(CGM.getContext()))
2474    return;
2475
2476  // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2477  // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2478  // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2479  // We can always use __cxa_thread_atexit.
2480  if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2481    return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2482
2483  // In Apple kexts, we want to add a global destructor entry.
2484  // FIXME: shouldn't this be guarded by some variable?
2485  if (CGM.getLangOpts().AppleKext) {
2486    // Generate a global destructor entry.
2487    return CGM.AddCXXDtorEntry(dtor, addr);
2488  }
2489
2490  CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2491}
2492
2493static bool isThreadWrapperReplaceable(const VarDecl *VD,
2494                                       CodeGen::CodeGenModule &CGM) {
2495  assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2496  // Darwin prefers to have references to thread local variables to go through
2497  // the thread wrapper instead of directly referencing the backing variable.
2498  return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2499         CGM.getTarget().getTriple().isOSDarwin();
2500}
2501
2502/// Get the appropriate linkage for the wrapper function. This is essentially
2503/// the weak form of the variable's linkage; every translation unit which needs
2504/// the wrapper emits a copy, and we want the linker to merge them.
2505static llvm::GlobalValue::LinkageTypes
2506getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2507  llvm::GlobalValue::LinkageTypes VarLinkage =
2508      CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2509
2510  // For internal linkage variables, we don't need an external or weak wrapper.
2511  if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2512    return VarLinkage;
2513
2514  // If the thread wrapper is replaceable, give it appropriate linkage.
2515  if (isThreadWrapperReplaceable(VD, CGM))
2516    if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2517        !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2518      return VarLinkage;
2519  return llvm::GlobalValue::WeakODRLinkage;
2520}
2521
2522llvm::Function *
2523ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2524                                             llvm::Value *Val) {
2525  // Mangle the name for the thread_local wrapper function.
2526  SmallString<256> WrapperName;
2527  {
2528    llvm::raw_svector_ostream Out(WrapperName);
2529    getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2530  }
2531
2532  // FIXME: If VD is a definition, we should regenerate the function attributes
2533  // before returning.
2534  if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2535    return cast<llvm::Function>(V);
2536
2537  QualType RetQT = VD->getType();
2538  if (RetQT->isReferenceType())
2539    RetQT = RetQT.getNonReferenceType();
2540
2541  const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2542      getContext().getPointerType(RetQT), FunctionArgList());
2543
2544  llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2545  llvm::Function *Wrapper =
2546      llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2547                             WrapperName.str(), &CGM.getModule());
2548
2549  if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2550    Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2551
2552  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
2553
2554  // Always resolve references to the wrapper at link time.
2555  if (!Wrapper->hasLocalLinkage())
2556    if (!isThreadWrapperReplaceable(VD, CGM) ||
2557        llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2558        llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2559        VD->getVisibility() == HiddenVisibility)
2560      Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2561
2562  if (isThreadWrapperReplaceable(VD, CGM)) {
2563    Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2564    Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2565  }
2566
2567  ThreadWrappers.push_back({VD, Wrapper});
2568  return Wrapper;
2569}
2570
2571void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2572    CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2573    ArrayRef<llvm::Function *> CXXThreadLocalInits,
2574    ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2575  llvm::Function *InitFunc = nullptr;
2576
2577  // Separate initializers into those with ordered (or partially-ordered)
2578  // initialization and those with unordered initialization.
2579  llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2580  llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2581  for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2582    if (isTemplateInstantiation(
2583            CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2584      UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2585          CXXThreadLocalInits[I];
2586    else
2587      OrderedInits.push_back(CXXThreadLocalInits[I]);
2588  }
2589
2590  if (!OrderedInits.empty()) {
2591    // Generate a guarded initialization function.
2592    llvm::FunctionType *FTy =
2593        llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2594    const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2595    InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", FI,
2596                                                      SourceLocation(),
2597                                                      /*TLS=*/true);
2598    llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2599        CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2600        llvm::GlobalVariable::InternalLinkage,
2601        llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2602    Guard->setThreadLocal(true);
2603
2604    CharUnits GuardAlign = CharUnits::One();
2605    Guard->setAlignment(GuardAlign.getAsAlign());
2606
2607    CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2608        InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
2609    // On Darwin platforms, use CXX_FAST_TLS calling convention.
2610    if (CGM.getTarget().getTriple().isOSDarwin()) {
2611      InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2612      InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2613    }
2614  }
2615
2616  // Create declarations for thread wrappers for all thread-local variables
2617  // with non-discardable definitions in this translation unit.
2618  for (const VarDecl *VD : CXXThreadLocals) {
2619    if (VD->hasDefinition() &&
2620        !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2621      llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2622      getOrCreateThreadLocalWrapper(VD, GV);
2623    }
2624  }
2625
2626  // Emit all referenced thread wrappers.
2627  for (auto VDAndWrapper : ThreadWrappers) {
2628    const VarDecl *VD = VDAndWrapper.first;
2629    llvm::GlobalVariable *Var =
2630        cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2631    llvm::Function *Wrapper = VDAndWrapper.second;
2632
2633    // Some targets require that all access to thread local variables go through
2634    // the thread wrapper.  This means that we cannot attempt to create a thread
2635    // wrapper or a thread helper.
2636    if (!VD->hasDefinition()) {
2637      if (isThreadWrapperReplaceable(VD, CGM)) {
2638        Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2639        continue;
2640      }
2641
2642      // If this isn't a TU in which this variable is defined, the thread
2643      // wrapper is discardable.
2644      if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2645        Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2646    }
2647
2648    CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2649
2650    // Mangle the name for the thread_local initialization function.
2651    SmallString<256> InitFnName;
2652    {
2653      llvm::raw_svector_ostream Out(InitFnName);
2654      getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2655    }
2656
2657    llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2658
2659    // If we have a definition for the variable, emit the initialization
2660    // function as an alias to the global Init function (if any). Otherwise,
2661    // produce a declaration of the initialization function.
2662    llvm::GlobalValue *Init = nullptr;
2663    bool InitIsInitFunc = false;
2664    bool HasConstantInitialization = false;
2665    if (!usesThreadWrapperFunction(VD)) {
2666      HasConstantInitialization = true;
2667    } else if (VD->hasDefinition()) {
2668      InitIsInitFunc = true;
2669      llvm::Function *InitFuncToUse = InitFunc;
2670      if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2671        InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2672      if (InitFuncToUse)
2673        Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2674                                         InitFuncToUse);
2675    } else {
2676      // Emit a weak global function referring to the initialization function.
2677      // This function will not exist if the TU defining the thread_local
2678      // variable in question does not need any dynamic initialization for
2679      // its thread_local variables.
2680      Init = llvm::Function::Create(InitFnTy,
2681                                    llvm::GlobalVariable::ExternalWeakLinkage,
2682                                    InitFnName.str(), &CGM.getModule());
2683      const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2684      CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2685                                    cast<llvm::Function>(Init));
2686    }
2687
2688    if (Init) {
2689      Init->setVisibility(Var->getVisibility());
2690      // Don't mark an extern_weak function DSO local on windows.
2691      if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2692        Init->setDSOLocal(Var->isDSOLocal());
2693    }
2694
2695    llvm::LLVMContext &Context = CGM.getModule().getContext();
2696    llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2697    CGBuilderTy Builder(CGM, Entry);
2698    if (HasConstantInitialization) {
2699      // No dynamic initialization to invoke.
2700    } else if (InitIsInitFunc) {
2701      if (Init) {
2702        llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2703        if (isThreadWrapperReplaceable(VD, CGM)) {
2704          CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2705          llvm::Function *Fn =
2706              cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2707          Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2708        }
2709      }
2710    } else {
2711      // Don't know whether we have an init function. Call it if it exists.
2712      llvm::Value *Have = Builder.CreateIsNotNull(Init);
2713      llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2714      llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2715      Builder.CreateCondBr(Have, InitBB, ExitBB);
2716
2717      Builder.SetInsertPoint(InitBB);
2718      Builder.CreateCall(InitFnTy, Init);
2719      Builder.CreateBr(ExitBB);
2720
2721      Builder.SetInsertPoint(ExitBB);
2722    }
2723
2724    // For a reference, the result of the wrapper function is a pointer to
2725    // the referenced object.
2726    llvm::Value *Val = Var;
2727    if (VD->getType()->isReferenceType()) {
2728      CharUnits Align = CGM.getContext().getDeclAlign(VD);
2729      Val = Builder.CreateAlignedLoad(Val, Align);
2730    }
2731    if (Val->getType() != Wrapper->getReturnType())
2732      Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2733          Val, Wrapper->getReturnType(), "");
2734    Builder.CreateRet(Val);
2735  }
2736}
2737
2738LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
2739                                                   const VarDecl *VD,
2740                                                   QualType LValType) {
2741  llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
2742  llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
2743
2744  llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
2745  CallVal->setCallingConv(Wrapper->getCallingConv());
2746
2747  LValue LV;
2748  if (VD->getType()->isReferenceType())
2749    LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
2750  else
2751    LV = CGF.MakeAddrLValue(CallVal, LValType,
2752                            CGF.getContext().getDeclAlign(VD));
2753  // FIXME: need setObjCGCLValueClass?
2754  return LV;
2755}
2756
2757/// Return whether the given global decl needs a VTT parameter, which it does
2758/// if it's a base constructor or destructor with virtual bases.
2759bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
2760  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
2761
2762  // We don't have any virtual bases, just return early.
2763  if (!MD->getParent()->getNumVBases())
2764    return false;
2765
2766  // Check if we have a base constructor.
2767  if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
2768    return true;
2769
2770  // Check if we have a base destructor.
2771  if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
2772    return true;
2773
2774  return false;
2775}
2776
2777namespace {
2778class ItaniumRTTIBuilder {
2779  CodeGenModule &CGM;  // Per-module state.
2780  llvm::LLVMContext &VMContext;
2781  const ItaniumCXXABI &CXXABI;  // Per-module state.
2782
2783  /// Fields - The fields of the RTTI descriptor currently being built.
2784  SmallVector<llvm::Constant *, 16> Fields;
2785
2786  /// GetAddrOfTypeName - Returns the mangled type name of the given type.
2787  llvm::GlobalVariable *
2788  GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
2789
2790  /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
2791  /// descriptor of the given type.
2792  llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
2793
2794  /// BuildVTablePointer - Build the vtable pointer for the given type.
2795  void BuildVTablePointer(const Type *Ty);
2796
2797  /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
2798  /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
2799  void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
2800
2801  /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
2802  /// classes with bases that do not satisfy the abi::__si_class_type_info
2803  /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
2804  void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
2805
2806  /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
2807  /// for pointer types.
2808  void BuildPointerTypeInfo(QualType PointeeTy);
2809
2810  /// BuildObjCObjectTypeInfo - Build the appropriate kind of
2811  /// type_info for an object type.
2812  void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
2813
2814  /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
2815  /// struct, used for member pointer types.
2816  void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
2817
2818public:
2819  ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
2820      : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
2821
2822  // Pointer type info flags.
2823  enum {
2824    /// PTI_Const - Type has const qualifier.
2825    PTI_Const = 0x1,
2826
2827    /// PTI_Volatile - Type has volatile qualifier.
2828    PTI_Volatile = 0x2,
2829
2830    /// PTI_Restrict - Type has restrict qualifier.
2831    PTI_Restrict = 0x4,
2832
2833    /// PTI_Incomplete - Type is incomplete.
2834    PTI_Incomplete = 0x8,
2835
2836    /// PTI_ContainingClassIncomplete - Containing class is incomplete.
2837    /// (in pointer to member).
2838    PTI_ContainingClassIncomplete = 0x10,
2839
2840    /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
2841    //PTI_TransactionSafe = 0x20,
2842
2843    /// PTI_Noexcept - Pointee is noexcept function (C++1z).
2844    PTI_Noexcept = 0x40,
2845  };
2846
2847  // VMI type info flags.
2848  enum {
2849    /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
2850    VMI_NonDiamondRepeat = 0x1,
2851
2852    /// VMI_DiamondShaped - Class is diamond shaped.
2853    VMI_DiamondShaped = 0x2
2854  };
2855
2856  // Base class type info flags.
2857  enum {
2858    /// BCTI_Virtual - Base class is virtual.
2859    BCTI_Virtual = 0x1,
2860
2861    /// BCTI_Public - Base class is public.
2862    BCTI_Public = 0x2
2863  };
2864
2865  /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
2866  /// link to an existing RTTI descriptor if one already exists.
2867  llvm::Constant *BuildTypeInfo(QualType Ty);
2868
2869  /// BuildTypeInfo - Build the RTTI type info struct for the given type.
2870  llvm::Constant *BuildTypeInfo(
2871      QualType Ty,
2872      llvm::GlobalVariable::LinkageTypes Linkage,
2873      llvm::GlobalValue::VisibilityTypes Visibility,
2874      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
2875};
2876}
2877
2878llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
2879    QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
2880  SmallString<256> Name;
2881  llvm::raw_svector_ostream Out(Name);
2882  CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
2883
2884  // We know that the mangled name of the type starts at index 4 of the
2885  // mangled name of the typename, so we can just index into it in order to
2886  // get the mangled name of the type.
2887  llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
2888                                                            Name.substr(4));
2889  auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
2890
2891  llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
2892      Name, Init->getType(), Linkage, Align.getQuantity());
2893
2894  GV->setInitializer(Init);
2895
2896  return GV;
2897}
2898
2899llvm::Constant *
2900ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
2901  // Mangle the RTTI name.
2902  SmallString<256> Name;
2903  llvm::raw_svector_ostream Out(Name);
2904  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
2905
2906  // Look for an existing global.
2907  llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
2908
2909  if (!GV) {
2910    // Create a new global variable.
2911    // Note for the future: If we would ever like to do deferred emission of
2912    // RTTI, check if emitting vtables opportunistically need any adjustment.
2913
2914    GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
2915                                  /*isConstant=*/true,
2916                                  llvm::GlobalValue::ExternalLinkage, nullptr,
2917                                  Name);
2918    const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
2919    CGM.setGVProperties(GV, RD);
2920  }
2921
2922  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
2923}
2924
2925/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
2926/// info for that type is defined in the standard library.
2927static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
2928  // Itanium C++ ABI 2.9.2:
2929  //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
2930  //   the run-time support library. Specifically, the run-time support
2931  //   library should contain type_info objects for the types X, X* and
2932  //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
2933  //   unsigned char, signed char, short, unsigned short, int, unsigned int,
2934  //   long, unsigned long, long long, unsigned long long, float, double,
2935  //   long double, char16_t, char32_t, and the IEEE 754r decimal and
2936  //   half-precision floating point types.
2937  //
2938  // GCC also emits RTTI for __int128.
2939  // FIXME: We do not emit RTTI information for decimal types here.
2940
2941  // Types added here must also be added to EmitFundamentalRTTIDescriptors.
2942  switch (Ty->getKind()) {
2943    case BuiltinType::Void:
2944    case BuiltinType::NullPtr:
2945    case BuiltinType::Bool:
2946    case BuiltinType::WChar_S:
2947    case BuiltinType::WChar_U:
2948    case BuiltinType::Char_U:
2949    case BuiltinType::Char_S:
2950    case BuiltinType::UChar:
2951    case BuiltinType::SChar:
2952    case BuiltinType::Short:
2953    case BuiltinType::UShort:
2954    case BuiltinType::Int:
2955    case BuiltinType::UInt:
2956    case BuiltinType::Long:
2957    case BuiltinType::ULong:
2958    case BuiltinType::LongLong:
2959    case BuiltinType::ULongLong:
2960    case BuiltinType::Half:
2961    case BuiltinType::Float:
2962    case BuiltinType::Double:
2963    case BuiltinType::LongDouble:
2964    case BuiltinType::Float16:
2965    case BuiltinType::Float128:
2966    case BuiltinType::Char8:
2967    case BuiltinType::Char16:
2968    case BuiltinType::Char32:
2969    case BuiltinType::Int128:
2970    case BuiltinType::UInt128:
2971      return true;
2972
2973#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2974    case BuiltinType::Id:
2975#include "clang/Basic/OpenCLImageTypes.def"
2976#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2977    case BuiltinType::Id:
2978#include "clang/Basic/OpenCLExtensionTypes.def"
2979    case BuiltinType::OCLSampler:
2980    case BuiltinType::OCLEvent:
2981    case BuiltinType::OCLClkEvent:
2982    case BuiltinType::OCLQueue:
2983    case BuiltinType::OCLReserveID:
2984#define SVE_TYPE(Name, Id, SingletonId) \
2985    case BuiltinType::Id:
2986#include "clang/Basic/AArch64SVEACLETypes.def"
2987    case BuiltinType::ShortAccum:
2988    case BuiltinType::Accum:
2989    case BuiltinType::LongAccum:
2990    case BuiltinType::UShortAccum:
2991    case BuiltinType::UAccum:
2992    case BuiltinType::ULongAccum:
2993    case BuiltinType::ShortFract:
2994    case BuiltinType::Fract:
2995    case BuiltinType::LongFract:
2996    case BuiltinType::UShortFract:
2997    case BuiltinType::UFract:
2998    case BuiltinType::ULongFract:
2999    case BuiltinType::SatShortAccum:
3000    case BuiltinType::SatAccum:
3001    case BuiltinType::SatLongAccum:
3002    case BuiltinType::SatUShortAccum:
3003    case BuiltinType::SatUAccum:
3004    case BuiltinType::SatULongAccum:
3005    case BuiltinType::SatShortFract:
3006    case BuiltinType::SatFract:
3007    case BuiltinType::SatLongFract:
3008    case BuiltinType::SatUShortFract:
3009    case BuiltinType::SatUFract:
3010    case BuiltinType::SatULongFract:
3011      return false;
3012
3013    case BuiltinType::Dependent:
3014#define BUILTIN_TYPE(Id, SingletonId)
3015#define PLACEHOLDER_TYPE(Id, SingletonId) \
3016    case BuiltinType::Id:
3017#include "clang/AST/BuiltinTypes.def"
3018      llvm_unreachable("asking for RRTI for a placeholder type!");
3019
3020    case BuiltinType::ObjCId:
3021    case BuiltinType::ObjCClass:
3022    case BuiltinType::ObjCSel:
3023      llvm_unreachable("FIXME: Objective-C types are unsupported!");
3024  }
3025
3026  llvm_unreachable("Invalid BuiltinType Kind!");
3027}
3028
3029static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3030  QualType PointeeTy = PointerTy->getPointeeType();
3031  const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3032  if (!BuiltinTy)
3033    return false;
3034
3035  // Check the qualifiers.
3036  Qualifiers Quals = PointeeTy.getQualifiers();
3037  Quals.removeConst();
3038
3039  if (!Quals.empty())
3040    return false;
3041
3042  return TypeInfoIsInStandardLibrary(BuiltinTy);
3043}
3044
3045/// IsStandardLibraryRTTIDescriptor - Returns whether the type
3046/// information for the given type exists in the standard library.
3047static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3048  // Type info for builtin types is defined in the standard library.
3049  if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3050    return TypeInfoIsInStandardLibrary(BuiltinTy);
3051
3052  // Type info for some pointer types to builtin types is defined in the
3053  // standard library.
3054  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3055    return TypeInfoIsInStandardLibrary(PointerTy);
3056
3057  return false;
3058}
3059
3060/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3061/// the given type exists somewhere else, and that we should not emit the type
3062/// information in this translation unit.  Assumes that it is not a
3063/// standard-library type.
3064static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3065                                            QualType Ty) {
3066  ASTContext &Context = CGM.getContext();
3067
3068  // If RTTI is disabled, assume it might be disabled in the
3069  // translation unit that defines any potential key function, too.
3070  if (!Context.getLangOpts().RTTI) return false;
3071
3072  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3073    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3074    if (!RD->hasDefinition())
3075      return false;
3076
3077    if (!RD->isDynamicClass())
3078      return false;
3079
3080    // FIXME: this may need to be reconsidered if the key function
3081    // changes.
3082    // N.B. We must always emit the RTTI data ourselves if there exists a key
3083    // function.
3084    bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3085
3086    // Don't import the RTTI but emit it locally.
3087    if (CGM.getTriple().isWindowsGNUEnvironment())
3088      return false;
3089
3090    if (CGM.getVTables().isVTableExternal(RD))
3091      return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3092                 ? false
3093                 : true;
3094
3095    if (IsDLLImport)
3096      return true;
3097  }
3098
3099  return false;
3100}
3101
3102/// IsIncompleteClassType - Returns whether the given record type is incomplete.
3103static bool IsIncompleteClassType(const RecordType *RecordTy) {
3104  return !RecordTy->getDecl()->isCompleteDefinition();
3105}
3106
3107/// ContainsIncompleteClassType - Returns whether the given type contains an
3108/// incomplete class type. This is true if
3109///
3110///   * The given type is an incomplete class type.
3111///   * The given type is a pointer type whose pointee type contains an
3112///     incomplete class type.
3113///   * The given type is a member pointer type whose class is an incomplete
3114///     class type.
3115///   * The given type is a member pointer type whoise pointee type contains an
3116///     incomplete class type.
3117/// is an indirect or direct pointer to an incomplete class type.
3118static bool ContainsIncompleteClassType(QualType Ty) {
3119  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3120    if (IsIncompleteClassType(RecordTy))
3121      return true;
3122  }
3123
3124  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3125    return ContainsIncompleteClassType(PointerTy->getPointeeType());
3126
3127  if (const MemberPointerType *MemberPointerTy =
3128      dyn_cast<MemberPointerType>(Ty)) {
3129    // Check if the class type is incomplete.
3130    const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3131    if (IsIncompleteClassType(ClassType))
3132      return true;
3133
3134    return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3135  }
3136
3137  return false;
3138}
3139
3140// CanUseSingleInheritance - Return whether the given record decl has a "single,
3141// public, non-virtual base at offset zero (i.e. the derived class is dynamic
3142// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3143static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3144  // Check the number of bases.
3145  if (RD->getNumBases() != 1)
3146    return false;
3147
3148  // Get the base.
3149  CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3150
3151  // Check that the base is not virtual.
3152  if (Base->isVirtual())
3153    return false;
3154
3155  // Check that the base is public.
3156  if (Base->getAccessSpecifier() != AS_public)
3157    return false;
3158
3159  // Check that the class is dynamic iff the base is.
3160  auto *BaseDecl =
3161      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3162  if (!BaseDecl->isEmpty() &&
3163      BaseDecl->isDynamicClass() != RD->isDynamicClass())
3164    return false;
3165
3166  return true;
3167}
3168
3169void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3170  // abi::__class_type_info.
3171  static const char * const ClassTypeInfo =
3172    "_ZTVN10__cxxabiv117__class_type_infoE";
3173  // abi::__si_class_type_info.
3174  static const char * const SIClassTypeInfo =
3175    "_ZTVN10__cxxabiv120__si_class_type_infoE";
3176  // abi::__vmi_class_type_info.
3177  static const char * const VMIClassTypeInfo =
3178    "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3179
3180  const char *VTableName = nullptr;
3181
3182  switch (Ty->getTypeClass()) {
3183#define TYPE(Class, Base)
3184#define ABSTRACT_TYPE(Class, Base)
3185#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3186#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3187#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3188#include "clang/AST/TypeNodes.inc"
3189    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3190
3191  case Type::LValueReference:
3192  case Type::RValueReference:
3193    llvm_unreachable("References shouldn't get here");
3194
3195  case Type::Auto:
3196  case Type::DeducedTemplateSpecialization:
3197    llvm_unreachable("Undeduced type shouldn't get here");
3198
3199  case Type::Pipe:
3200    llvm_unreachable("Pipe types shouldn't get here");
3201
3202  case Type::Builtin:
3203  // GCC treats vector and complex types as fundamental types.
3204  case Type::Vector:
3205  case Type::ExtVector:
3206  case Type::Complex:
3207  case Type::Atomic:
3208  // FIXME: GCC treats block pointers as fundamental types?!
3209  case Type::BlockPointer:
3210    // abi::__fundamental_type_info.
3211    VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3212    break;
3213
3214  case Type::ConstantArray:
3215  case Type::IncompleteArray:
3216  case Type::VariableArray:
3217    // abi::__array_type_info.
3218    VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3219    break;
3220
3221  case Type::FunctionNoProto:
3222  case Type::FunctionProto:
3223    // abi::__function_type_info.
3224    VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3225    break;
3226
3227  case Type::Enum:
3228    // abi::__enum_type_info.
3229    VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3230    break;
3231
3232  case Type::Record: {
3233    const CXXRecordDecl *RD =
3234      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3235
3236    if (!RD->hasDefinition() || !RD->getNumBases()) {
3237      VTableName = ClassTypeInfo;
3238    } else if (CanUseSingleInheritance(RD)) {
3239      VTableName = SIClassTypeInfo;
3240    } else {
3241      VTableName = VMIClassTypeInfo;
3242    }
3243
3244    break;
3245  }
3246
3247  case Type::ObjCObject:
3248    // Ignore protocol qualifiers.
3249    Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3250
3251    // Handle id and Class.
3252    if (isa<BuiltinType>(Ty)) {
3253      VTableName = ClassTypeInfo;
3254      break;
3255    }
3256
3257    assert(isa<ObjCInterfaceType>(Ty));
3258    LLVM_FALLTHROUGH;
3259
3260  case Type::ObjCInterface:
3261    if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3262      VTableName = SIClassTypeInfo;
3263    } else {
3264      VTableName = ClassTypeInfo;
3265    }
3266    break;
3267
3268  case Type::ObjCObjectPointer:
3269  case Type::Pointer:
3270    // abi::__pointer_type_info.
3271    VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3272    break;
3273
3274  case Type::MemberPointer:
3275    // abi::__pointer_to_member_type_info.
3276    VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3277    break;
3278  }
3279
3280  llvm::Constant *VTable =
3281    CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3282  CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3283
3284  llvm::Type *PtrDiffTy =
3285    CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3286
3287  // The vtable address point is 2.
3288  llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3289  VTable =
3290      llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two);
3291  VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3292
3293  Fields.push_back(VTable);
3294}
3295
3296/// Return the linkage that the type info and type info name constants
3297/// should have for the given type.
3298static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3299                                                             QualType Ty) {
3300  // Itanium C++ ABI 2.9.5p7:
3301  //   In addition, it and all of the intermediate abi::__pointer_type_info
3302  //   structs in the chain down to the abi::__class_type_info for the
3303  //   incomplete class type must be prevented from resolving to the
3304  //   corresponding type_info structs for the complete class type, possibly
3305  //   by making them local static objects. Finally, a dummy class RTTI is
3306  //   generated for the incomplete type that will not resolve to the final
3307  //   complete class RTTI (because the latter need not exist), possibly by
3308  //   making it a local static object.
3309  if (ContainsIncompleteClassType(Ty))
3310    return llvm::GlobalValue::InternalLinkage;
3311
3312  switch (Ty->getLinkage()) {
3313  case NoLinkage:
3314  case InternalLinkage:
3315  case UniqueExternalLinkage:
3316    return llvm::GlobalValue::InternalLinkage;
3317
3318  case VisibleNoLinkage:
3319  case ModuleInternalLinkage:
3320  case ModuleLinkage:
3321  case ExternalLinkage:
3322    // RTTI is not enabled, which means that this type info struct is going
3323    // to be used for exception handling. Give it linkonce_odr linkage.
3324    if (!CGM.getLangOpts().RTTI)
3325      return llvm::GlobalValue::LinkOnceODRLinkage;
3326
3327    if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3328      const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3329      if (RD->hasAttr<WeakAttr>())
3330        return llvm::GlobalValue::WeakODRLinkage;
3331      if (CGM.getTriple().isWindowsItaniumEnvironment())
3332        if (RD->hasAttr<DLLImportAttr>() &&
3333            ShouldUseExternalRTTIDescriptor(CGM, Ty))
3334          return llvm::GlobalValue::ExternalLinkage;
3335      // MinGW always uses LinkOnceODRLinkage for type info.
3336      if (RD->isDynamicClass() &&
3337          !CGM.getContext()
3338               .getTargetInfo()
3339               .getTriple()
3340               .isWindowsGNUEnvironment())
3341        return CGM.getVTableLinkage(RD);
3342    }
3343
3344    return llvm::GlobalValue::LinkOnceODRLinkage;
3345  }
3346
3347  llvm_unreachable("Invalid linkage!");
3348}
3349
3350llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3351  // We want to operate on the canonical type.
3352  Ty = Ty.getCanonicalType();
3353
3354  // Check if we've already emitted an RTTI descriptor for this type.
3355  SmallString<256> Name;
3356  llvm::raw_svector_ostream Out(Name);
3357  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3358
3359  llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3360  if (OldGV && !OldGV->isDeclaration()) {
3361    assert(!OldGV->hasAvailableExternallyLinkage() &&
3362           "available_externally typeinfos not yet implemented");
3363
3364    return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3365  }
3366
3367  // Check if there is already an external RTTI descriptor for this type.
3368  if (IsStandardLibraryRTTIDescriptor(Ty) ||
3369      ShouldUseExternalRTTIDescriptor(CGM, Ty))
3370    return GetAddrOfExternalRTTIDescriptor(Ty);
3371
3372  // Emit the standard library with external linkage.
3373  llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3374
3375  // Give the type_info object and name the formal visibility of the
3376  // type itself.
3377  llvm::GlobalValue::VisibilityTypes llvmVisibility;
3378  if (llvm::GlobalValue::isLocalLinkage(Linkage))
3379    // If the linkage is local, only default visibility makes sense.
3380    llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3381  else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3382           ItaniumCXXABI::RUK_NonUniqueHidden)
3383    llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3384  else
3385    llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3386
3387  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3388      llvm::GlobalValue::DefaultStorageClass;
3389  if (CGM.getTriple().isWindowsItaniumEnvironment()) {
3390    auto RD = Ty->getAsCXXRecordDecl();
3391    if (RD && RD->hasAttr<DLLExportAttr>())
3392      DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3393  }
3394
3395  return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3396}
3397
3398llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3399      QualType Ty,
3400      llvm::GlobalVariable::LinkageTypes Linkage,
3401      llvm::GlobalValue::VisibilityTypes Visibility,
3402      llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3403  // Add the vtable pointer.
3404  BuildVTablePointer(cast<Type>(Ty));
3405
3406  // And the name.
3407  llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3408  llvm::Constant *TypeNameField;
3409
3410  // If we're supposed to demote the visibility, be sure to set a flag
3411  // to use a string comparison for type_info comparisons.
3412  ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3413      CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3414  if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3415    // The flag is the sign bit, which on ARM64 is defined to be clear
3416    // for global pointers.  This is very ARM64-specific.
3417    TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3418    llvm::Constant *flag =
3419        llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3420    TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3421    TypeNameField =
3422        llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3423  } else {
3424    TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3425  }
3426  Fields.push_back(TypeNameField);
3427
3428  switch (Ty->getTypeClass()) {
3429#define TYPE(Class, Base)
3430#define ABSTRACT_TYPE(Class, Base)
3431#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3432#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3433#define DEPENDENT_TYPE(Class, Base) case Type::Class:
3434#include "clang/AST/TypeNodes.inc"
3435    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3436
3437  // GCC treats vector types as fundamental types.
3438  case Type::Builtin:
3439  case Type::Vector:
3440  case Type::ExtVector:
3441  case Type::Complex:
3442  case Type::BlockPointer:
3443    // Itanium C++ ABI 2.9.5p4:
3444    // abi::__fundamental_type_info adds no data members to std::type_info.
3445    break;
3446
3447  case Type::LValueReference:
3448  case Type::RValueReference:
3449    llvm_unreachable("References shouldn't get here");
3450
3451  case Type::Auto:
3452  case Type::DeducedTemplateSpecialization:
3453    llvm_unreachable("Undeduced type shouldn't get here");
3454
3455  case Type::Pipe:
3456    llvm_unreachable("Pipe type shouldn't get here");
3457
3458  case Type::ConstantArray:
3459  case Type::IncompleteArray:
3460  case Type::VariableArray:
3461    // Itanium C++ ABI 2.9.5p5:
3462    // abi::__array_type_info adds no data members to std::type_info.
3463    break;
3464
3465  case Type::FunctionNoProto:
3466  case Type::FunctionProto:
3467    // Itanium C++ ABI 2.9.5p5:
3468    // abi::__function_type_info adds no data members to std::type_info.
3469    break;
3470
3471  case Type::Enum:
3472    // Itanium C++ ABI 2.9.5p5:
3473    // abi::__enum_type_info adds no data members to std::type_info.
3474    break;
3475
3476  case Type::Record: {
3477    const CXXRecordDecl *RD =
3478      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3479    if (!RD->hasDefinition() || !RD->getNumBases()) {
3480      // We don't need to emit any fields.
3481      break;
3482    }
3483
3484    if (CanUseSingleInheritance(RD))
3485      BuildSIClassTypeInfo(RD);
3486    else
3487      BuildVMIClassTypeInfo(RD);
3488
3489    break;
3490  }
3491
3492  case Type::ObjCObject:
3493  case Type::ObjCInterface:
3494    BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3495    break;
3496
3497  case Type::ObjCObjectPointer:
3498    BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3499    break;
3500
3501  case Type::Pointer:
3502    BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3503    break;
3504
3505  case Type::MemberPointer:
3506    BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3507    break;
3508
3509  case Type::Atomic:
3510    // No fields, at least for the moment.
3511    break;
3512  }
3513
3514  llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3515
3516  SmallString<256> Name;
3517  llvm::raw_svector_ostream Out(Name);
3518  CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3519  llvm::Module &M = CGM.getModule();
3520  llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3521  llvm::GlobalVariable *GV =
3522      new llvm::GlobalVariable(M, Init->getType(),
3523                               /*isConstant=*/true, Linkage, Init, Name);
3524
3525  // If there's already an old global variable, replace it with the new one.
3526  if (OldGV) {
3527    GV->takeName(OldGV);
3528    llvm::Constant *NewPtr =
3529      llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3530    OldGV->replaceAllUsesWith(NewPtr);
3531    OldGV->eraseFromParent();
3532  }
3533
3534  if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3535    GV->setComdat(M.getOrInsertComdat(GV->getName()));
3536
3537  CharUnits Align =
3538      CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3539  GV->setAlignment(Align.getAsAlign());
3540
3541  // The Itanium ABI specifies that type_info objects must be globally
3542  // unique, with one exception: if the type is an incomplete class
3543  // type or a (possibly indirect) pointer to one.  That exception
3544  // affects the general case of comparing type_info objects produced
3545  // by the typeid operator, which is why the comparison operators on
3546  // std::type_info generally use the type_info name pointers instead
3547  // of the object addresses.  However, the language's built-in uses
3548  // of RTTI generally require class types to be complete, even when
3549  // manipulating pointers to those class types.  This allows the
3550  // implementation of dynamic_cast to rely on address equality tests,
3551  // which is much faster.
3552
3553  // All of this is to say that it's important that both the type_info
3554  // object and the type_info name be uniqued when weakly emitted.
3555
3556  TypeName->setVisibility(Visibility);
3557  CGM.setDSOLocal(TypeName);
3558
3559  GV->setVisibility(Visibility);
3560  CGM.setDSOLocal(GV);
3561
3562  TypeName->setDLLStorageClass(DLLStorageClass);
3563  GV->setDLLStorageClass(DLLStorageClass);
3564
3565  TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3566  GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3567
3568  return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3569}
3570
3571/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3572/// for the given Objective-C object type.
3573void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3574  // Drop qualifiers.
3575  const Type *T = OT->getBaseType().getTypePtr();
3576  assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3577
3578  // The builtin types are abi::__class_type_infos and don't require
3579  // extra fields.
3580  if (isa<BuiltinType>(T)) return;
3581
3582  ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3583  ObjCInterfaceDecl *Super = Class->getSuperClass();
3584
3585  // Root classes are also __class_type_info.
3586  if (!Super) return;
3587
3588  QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3589
3590  // Everything else is single inheritance.
3591  llvm::Constant *BaseTypeInfo =
3592      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3593  Fields.push_back(BaseTypeInfo);
3594}
3595
3596/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3597/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3598void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3599  // Itanium C++ ABI 2.9.5p6b:
3600  // It adds to abi::__class_type_info a single member pointing to the
3601  // type_info structure for the base type,
3602  llvm::Constant *BaseTypeInfo =
3603    ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3604  Fields.push_back(BaseTypeInfo);
3605}
3606
3607namespace {
3608  /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3609  /// a class hierarchy.
3610  struct SeenBases {
3611    llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3612    llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3613  };
3614}
3615
3616/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3617/// abi::__vmi_class_type_info.
3618///
3619static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3620                                             SeenBases &Bases) {
3621
3622  unsigned Flags = 0;
3623
3624  auto *BaseDecl =
3625      cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3626
3627  if (Base->isVirtual()) {
3628    // Mark the virtual base as seen.
3629    if (!Bases.VirtualBases.insert(BaseDecl).second) {
3630      // If this virtual base has been seen before, then the class is diamond
3631      // shaped.
3632      Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3633    } else {
3634      if (Bases.NonVirtualBases.count(BaseDecl))
3635        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3636    }
3637  } else {
3638    // Mark the non-virtual base as seen.
3639    if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3640      // If this non-virtual base has been seen before, then the class has non-
3641      // diamond shaped repeated inheritance.
3642      Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3643    } else {
3644      if (Bases.VirtualBases.count(BaseDecl))
3645        Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3646    }
3647  }
3648
3649  // Walk all bases.
3650  for (const auto &I : BaseDecl->bases())
3651    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3652
3653  return Flags;
3654}
3655
3656static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3657  unsigned Flags = 0;
3658  SeenBases Bases;
3659
3660  // Walk all bases.
3661  for (const auto &I : RD->bases())
3662    Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3663
3664  return Flags;
3665}
3666
3667/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3668/// classes with bases that do not satisfy the abi::__si_class_type_info
3669/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3670void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3671  llvm::Type *UnsignedIntLTy =
3672    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3673
3674  // Itanium C++ ABI 2.9.5p6c:
3675  //   __flags is a word with flags describing details about the class
3676  //   structure, which may be referenced by using the __flags_masks
3677  //   enumeration. These flags refer to both direct and indirect bases.
3678  unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3679  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3680
3681  // Itanium C++ ABI 2.9.5p6c:
3682  //   __base_count is a word with the number of direct proper base class
3683  //   descriptions that follow.
3684  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
3685
3686  if (!RD->getNumBases())
3687    return;
3688
3689  // Now add the base class descriptions.
3690
3691  // Itanium C++ ABI 2.9.5p6c:
3692  //   __base_info[] is an array of base class descriptions -- one for every
3693  //   direct proper base. Each description is of the type:
3694  //
3695  //   struct abi::__base_class_type_info {
3696  //   public:
3697  //     const __class_type_info *__base_type;
3698  //     long __offset_flags;
3699  //
3700  //     enum __offset_flags_masks {
3701  //       __virtual_mask = 0x1,
3702  //       __public_mask = 0x2,
3703  //       __offset_shift = 8
3704  //     };
3705  //   };
3706
3707  // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
3708  // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
3709  // LLP64 platforms.
3710  // FIXME: Consider updating libc++abi to match, and extend this logic to all
3711  // LLP64 platforms.
3712  QualType OffsetFlagsTy = CGM.getContext().LongTy;
3713  const TargetInfo &TI = CGM.getContext().getTargetInfo();
3714  if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
3715    OffsetFlagsTy = CGM.getContext().LongLongTy;
3716  llvm::Type *OffsetFlagsLTy =
3717      CGM.getTypes().ConvertType(OffsetFlagsTy);
3718
3719  for (const auto &Base : RD->bases()) {
3720    // The __base_type member points to the RTTI for the base type.
3721    Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
3722
3723    auto *BaseDecl =
3724        cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
3725
3726    int64_t OffsetFlags = 0;
3727
3728    // All but the lower 8 bits of __offset_flags are a signed offset.
3729    // For a non-virtual base, this is the offset in the object of the base
3730    // subobject. For a virtual base, this is the offset in the virtual table of
3731    // the virtual base offset for the virtual base referenced (negative).
3732    CharUnits Offset;
3733    if (Base.isVirtual())
3734      Offset =
3735        CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
3736    else {
3737      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
3738      Offset = Layout.getBaseClassOffset(BaseDecl);
3739    };
3740
3741    OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
3742
3743    // The low-order byte of __offset_flags contains flags, as given by the
3744    // masks from the enumeration __offset_flags_masks.
3745    if (Base.isVirtual())
3746      OffsetFlags |= BCTI_Virtual;
3747    if (Base.getAccessSpecifier() == AS_public)
3748      OffsetFlags |= BCTI_Public;
3749
3750    Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
3751  }
3752}
3753
3754/// Compute the flags for a __pbase_type_info, and remove the corresponding
3755/// pieces from \p Type.
3756static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
3757  unsigned Flags = 0;
3758
3759  if (Type.isConstQualified())
3760    Flags |= ItaniumRTTIBuilder::PTI_Const;
3761  if (Type.isVolatileQualified())
3762    Flags |= ItaniumRTTIBuilder::PTI_Volatile;
3763  if (Type.isRestrictQualified())
3764    Flags |= ItaniumRTTIBuilder::PTI_Restrict;
3765  Type = Type.getUnqualifiedType();
3766
3767  // Itanium C++ ABI 2.9.5p7:
3768  //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
3769  //   incomplete class type, the incomplete target type flag is set.
3770  if (ContainsIncompleteClassType(Type))
3771    Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
3772
3773  if (auto *Proto = Type->getAs<FunctionProtoType>()) {
3774    if (Proto->isNothrow()) {
3775      Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
3776      Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
3777    }
3778  }
3779
3780  return Flags;
3781}
3782
3783/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
3784/// used for pointer types.
3785void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
3786  // Itanium C++ ABI 2.9.5p7:
3787  //   __flags is a flag word describing the cv-qualification and other
3788  //   attributes of the type pointed to
3789  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3790
3791  llvm::Type *UnsignedIntLTy =
3792    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3793  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3794
3795  // Itanium C++ ABI 2.9.5p7:
3796  //  __pointee is a pointer to the std::type_info derivation for the
3797  //  unqualified type being pointed to.
3798  llvm::Constant *PointeeTypeInfo =
3799      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3800  Fields.push_back(PointeeTypeInfo);
3801}
3802
3803/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3804/// struct, used for member pointer types.
3805void
3806ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
3807  QualType PointeeTy = Ty->getPointeeType();
3808
3809  // Itanium C++ ABI 2.9.5p7:
3810  //   __flags is a flag word describing the cv-qualification and other
3811  //   attributes of the type pointed to.
3812  unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
3813
3814  const RecordType *ClassType = cast<RecordType>(Ty->getClass());
3815  if (IsIncompleteClassType(ClassType))
3816    Flags |= PTI_ContainingClassIncomplete;
3817
3818  llvm::Type *UnsignedIntLTy =
3819    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3820  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3821
3822  // Itanium C++ ABI 2.9.5p7:
3823  //   __pointee is a pointer to the std::type_info derivation for the
3824  //   unqualified type being pointed to.
3825  llvm::Constant *PointeeTypeInfo =
3826      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
3827  Fields.push_back(PointeeTypeInfo);
3828
3829  // Itanium C++ ABI 2.9.5p9:
3830  //   __context is a pointer to an abi::__class_type_info corresponding to the
3831  //   class type containing the member pointed to
3832  //   (e.g., the "A" in "int A::*").
3833  Fields.push_back(
3834      ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
3835}
3836
3837llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
3838  return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
3839}
3840
3841void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
3842  // Types added here must also be added to TypeInfoIsInStandardLibrary.
3843  QualType FundamentalTypes[] = {
3844      getContext().VoidTy,             getContext().NullPtrTy,
3845      getContext().BoolTy,             getContext().WCharTy,
3846      getContext().CharTy,             getContext().UnsignedCharTy,
3847      getContext().SignedCharTy,       getContext().ShortTy,
3848      getContext().UnsignedShortTy,    getContext().IntTy,
3849      getContext().UnsignedIntTy,      getContext().LongTy,
3850      getContext().UnsignedLongTy,     getContext().LongLongTy,
3851      getContext().UnsignedLongLongTy, getContext().Int128Ty,
3852      getContext().UnsignedInt128Ty,   getContext().HalfTy,
3853      getContext().FloatTy,            getContext().DoubleTy,
3854      getContext().LongDoubleTy,       getContext().Float128Ty,
3855      getContext().Char8Ty,            getContext().Char16Ty,
3856      getContext().Char32Ty
3857  };
3858  llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3859      RD->hasAttr<DLLExportAttr>()
3860      ? llvm::GlobalValue::DLLExportStorageClass
3861      : llvm::GlobalValue::DefaultStorageClass;
3862  llvm::GlobalValue::VisibilityTypes Visibility =
3863      CodeGenModule::GetLLVMVisibility(RD->getVisibility());
3864  for (const QualType &FundamentalType : FundamentalTypes) {
3865    QualType PointerType = getContext().getPointerType(FundamentalType);
3866    QualType PointerTypeConst = getContext().getPointerType(
3867        FundamentalType.withConst());
3868    for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
3869      ItaniumRTTIBuilder(*this).BuildTypeInfo(
3870          Type, llvm::GlobalValue::ExternalLinkage,
3871          Visibility, DLLStorageClass);
3872  }
3873}
3874
3875/// What sort of uniqueness rules should we use for the RTTI for the
3876/// given type?
3877ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
3878    QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
3879  if (shouldRTTIBeUnique())
3880    return RUK_Unique;
3881
3882  // It's only necessary for linkonce_odr or weak_odr linkage.
3883  if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
3884      Linkage != llvm::GlobalValue::WeakODRLinkage)
3885    return RUK_Unique;
3886
3887  // It's only necessary with default visibility.
3888  if (CanTy->getVisibility() != DefaultVisibility)
3889    return RUK_Unique;
3890
3891  // If we're not required to publish this symbol, hide it.
3892  if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
3893    return RUK_NonUniqueHidden;
3894
3895  // If we're required to publish this symbol, as we might be under an
3896  // explicit instantiation, leave it with default visibility but
3897  // enable string-comparisons.
3898  assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
3899  return RUK_NonUniqueVisible;
3900}
3901
3902// Find out how to codegen the complete destructor and constructor
3903namespace {
3904enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
3905}
3906static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
3907                                       const CXXMethodDecl *MD) {
3908  if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
3909    return StructorCodegen::Emit;
3910
3911  // The complete and base structors are not equivalent if there are any virtual
3912  // bases, so emit separate functions.
3913  if (MD->getParent()->getNumVBases())
3914    return StructorCodegen::Emit;
3915
3916  GlobalDecl AliasDecl;
3917  if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
3918    AliasDecl = GlobalDecl(DD, Dtor_Complete);
3919  } else {
3920    const auto *CD = cast<CXXConstructorDecl>(MD);
3921    AliasDecl = GlobalDecl(CD, Ctor_Complete);
3922  }
3923  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3924
3925  if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
3926    return StructorCodegen::RAUW;
3927
3928  // FIXME: Should we allow available_externally aliases?
3929  if (!llvm::GlobalAlias::isValidLinkage(Linkage))
3930    return StructorCodegen::RAUW;
3931
3932  if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
3933    // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
3934    if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
3935        CGM.getTarget().getTriple().isOSBinFormatWasm())
3936      return StructorCodegen::COMDAT;
3937    return StructorCodegen::Emit;
3938  }
3939
3940  return StructorCodegen::Alias;
3941}
3942
3943static void emitConstructorDestructorAlias(CodeGenModule &CGM,
3944                                           GlobalDecl AliasDecl,
3945                                           GlobalDecl TargetDecl) {
3946  llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
3947
3948  StringRef MangledName = CGM.getMangledName(AliasDecl);
3949  llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
3950  if (Entry && !Entry->isDeclaration())
3951    return;
3952
3953  auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
3954
3955  // Create the alias with no name.
3956  auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
3957
3958  // Constructors and destructors are always unnamed_addr.
3959  Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3960
3961  // Switch any previous uses to the alias.
3962  if (Entry) {
3963    assert(Entry->getType() == Aliasee->getType() &&
3964           "declaration exists with different type");
3965    Alias->takeName(Entry);
3966    Entry->replaceAllUsesWith(Alias);
3967    Entry->eraseFromParent();
3968  } else {
3969    Alias->setName(MangledName);
3970  }
3971
3972  // Finally, set up the alias with its proper name and attributes.
3973  CGM.SetCommonAttributes(AliasDecl, Alias);
3974}
3975
3976void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
3977  auto *MD = cast<CXXMethodDecl>(GD.getDecl());
3978  auto *CD = dyn_cast<CXXConstructorDecl>(MD);
3979  const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
3980
3981  StructorCodegen CGType = getCodegenToUse(CGM, MD);
3982
3983  if (CD ? GD.getCtorType() == Ctor_Complete
3984         : GD.getDtorType() == Dtor_Complete) {
3985    GlobalDecl BaseDecl;
3986    if (CD)
3987      BaseDecl = GD.getWithCtorType(Ctor_Base);
3988    else
3989      BaseDecl = GD.getWithDtorType(Dtor_Base);
3990
3991    if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
3992      emitConstructorDestructorAlias(CGM, GD, BaseDecl);
3993      return;
3994    }
3995
3996    if (CGType == StructorCodegen::RAUW) {
3997      StringRef MangledName = CGM.getMangledName(GD);
3998      auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
3999      CGM.addReplacement(MangledName, Aliasee);
4000      return;
4001    }
4002  }
4003
4004  // The base destructor is equivalent to the base destructor of its
4005  // base class if there is exactly one non-virtual base class with a
4006  // non-trivial destructor, there are no fields with a non-trivial
4007  // destructor, and the body of the destructor is trivial.
4008  if (DD && GD.getDtorType() == Dtor_Base &&
4009      CGType != StructorCodegen::COMDAT &&
4010      !CGM.TryEmitBaseDestructorAsAlias(DD))
4011    return;
4012
4013  // FIXME: The deleting destructor is equivalent to the selected operator
4014  // delete if:
4015  //  * either the delete is a destroying operator delete or the destructor
4016  //    would be trivial if it weren't virtual,
4017  //  * the conversion from the 'this' parameter to the first parameter of the
4018  //    destructor is equivalent to a bitcast,
4019  //  * the destructor does not have an implicit "this" return, and
4020  //  * the operator delete has the same calling convention and IR function type
4021  //    as the destructor.
4022  // In such cases we should try to emit the deleting dtor as an alias to the
4023  // selected 'operator delete'.
4024
4025  llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4026
4027  if (CGType == StructorCodegen::COMDAT) {
4028    SmallString<256> Buffer;
4029    llvm::raw_svector_ostream Out(Buffer);
4030    if (DD)
4031      getMangleContext().mangleCXXDtorComdat(DD, Out);
4032    else
4033      getMangleContext().mangleCXXCtorComdat(CD, Out);
4034    llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4035    Fn->setComdat(C);
4036  } else {
4037    CGM.maybeSetTrivialComdat(*MD, *Fn);
4038  }
4039}
4040
4041static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4042  // void *__cxa_begin_catch(void*);
4043  llvm::FunctionType *FTy = llvm::FunctionType::get(
4044      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4045
4046  return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4047}
4048
4049static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4050  // void __cxa_end_catch();
4051  llvm::FunctionType *FTy =
4052      llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4053
4054  return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4055}
4056
4057static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4058  // void *__cxa_get_exception_ptr(void*);
4059  llvm::FunctionType *FTy = llvm::FunctionType::get(
4060      CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4061
4062  return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4063}
4064
4065namespace {
4066  /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4067  /// exception type lets us state definitively that the thrown exception
4068  /// type does not have a destructor.  In particular:
4069  ///   - Catch-alls tell us nothing, so we have to conservatively
4070  ///     assume that the thrown exception might have a destructor.
4071  ///   - Catches by reference behave according to their base types.
4072  ///   - Catches of non-record types will only trigger for exceptions
4073  ///     of non-record types, which never have destructors.
4074  ///   - Catches of record types can trigger for arbitrary subclasses
4075  ///     of the caught type, so we have to assume the actual thrown
4076  ///     exception type might have a throwing destructor, even if the
4077  ///     caught type's destructor is trivial or nothrow.
4078  struct CallEndCatch final : EHScopeStack::Cleanup {
4079    CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4080    bool MightThrow;
4081
4082    void Emit(CodeGenFunction &CGF, Flags flags) override {
4083      if (!MightThrow) {
4084        CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4085        return;
4086      }
4087
4088      CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4089    }
4090  };
4091}
4092
4093/// Emits a call to __cxa_begin_catch and enters a cleanup to call
4094/// __cxa_end_catch.
4095///
4096/// \param EndMightThrow - true if __cxa_end_catch might throw
4097static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4098                                   llvm::Value *Exn,
4099                                   bool EndMightThrow) {
4100  llvm::CallInst *call =
4101    CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4102
4103  CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4104
4105  return call;
4106}
4107
4108/// A "special initializer" callback for initializing a catch
4109/// parameter during catch initialization.
4110static void InitCatchParam(CodeGenFunction &CGF,
4111                           const VarDecl &CatchParam,
4112                           Address ParamAddr,
4113                           SourceLocation Loc) {
4114  // Load the exception from where the landing pad saved it.
4115  llvm::Value *Exn = CGF.getExceptionFromSlot();
4116
4117  CanQualType CatchType =
4118    CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4119  llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4120
4121  // If we're catching by reference, we can just cast the object
4122  // pointer to the appropriate pointer.
4123  if (isa<ReferenceType>(CatchType)) {
4124    QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4125    bool EndCatchMightThrow = CaughtType->isRecordType();
4126
4127    // __cxa_begin_catch returns the adjusted object pointer.
4128    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4129
4130    // We have no way to tell the personality function that we're
4131    // catching by reference, so if we're catching a pointer,
4132    // __cxa_begin_catch will actually return that pointer by value.
4133    if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4134      QualType PointeeType = PT->getPointeeType();
4135
4136      // When catching by reference, generally we should just ignore
4137      // this by-value pointer and use the exception object instead.
4138      if (!PointeeType->isRecordType()) {
4139
4140        // Exn points to the struct _Unwind_Exception header, which
4141        // we have to skip past in order to reach the exception data.
4142        unsigned HeaderSize =
4143          CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4144        AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
4145
4146      // However, if we're catching a pointer-to-record type that won't
4147      // work, because the personality function might have adjusted
4148      // the pointer.  There's actually no way for us to fully satisfy
4149      // the language/ABI contract here:  we can't use Exn because it
4150      // might have the wrong adjustment, but we can't use the by-value
4151      // pointer because it's off by a level of abstraction.
4152      //
4153      // The current solution is to dump the adjusted pointer into an
4154      // alloca, which breaks language semantics (because changing the
4155      // pointer doesn't change the exception) but at least works.
4156      // The better solution would be to filter out non-exact matches
4157      // and rethrow them, but this is tricky because the rethrow
4158      // really needs to be catchable by other sites at this landing
4159      // pad.  The best solution is to fix the personality function.
4160      } else {
4161        // Pull the pointer for the reference type off.
4162        llvm::Type *PtrTy =
4163          cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
4164
4165        // Create the temporary and write the adjusted pointer into it.
4166        Address ExnPtrTmp =
4167          CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4168        llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4169        CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4170
4171        // Bind the reference to the temporary.
4172        AdjustedExn = ExnPtrTmp.getPointer();
4173      }
4174    }
4175
4176    llvm::Value *ExnCast =
4177      CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4178    CGF.Builder.CreateStore(ExnCast, ParamAddr);
4179    return;
4180  }
4181
4182  // Scalars and complexes.
4183  TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4184  if (TEK != TEK_Aggregate) {
4185    llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4186
4187    // If the catch type is a pointer type, __cxa_begin_catch returns
4188    // the pointer by value.
4189    if (CatchType->hasPointerRepresentation()) {
4190      llvm::Value *CastExn =
4191        CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4192
4193      switch (CatchType.getQualifiers().getObjCLifetime()) {
4194      case Qualifiers::OCL_Strong:
4195        CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4196        LLVM_FALLTHROUGH;
4197
4198      case Qualifiers::OCL_None:
4199      case Qualifiers::OCL_ExplicitNone:
4200      case Qualifiers::OCL_Autoreleasing:
4201        CGF.Builder.CreateStore(CastExn, ParamAddr);
4202        return;
4203
4204      case Qualifiers::OCL_Weak:
4205        CGF.EmitARCInitWeak(ParamAddr, CastExn);
4206        return;
4207      }
4208      llvm_unreachable("bad ownership qualifier!");
4209    }
4210
4211    // Otherwise, it returns a pointer into the exception object.
4212
4213    llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4214    llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4215
4216    LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4217    LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4218    switch (TEK) {
4219    case TEK_Complex:
4220      CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4221                             /*init*/ true);
4222      return;
4223    case TEK_Scalar: {
4224      llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4225      CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4226      return;
4227    }
4228    case TEK_Aggregate:
4229      llvm_unreachable("evaluation kind filtered out!");
4230    }
4231    llvm_unreachable("bad evaluation kind");
4232  }
4233
4234  assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4235  auto catchRD = CatchType->getAsCXXRecordDecl();
4236  CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4237
4238  llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4239
4240  // Check for a copy expression.  If we don't have a copy expression,
4241  // that means a trivial copy is okay.
4242  const Expr *copyExpr = CatchParam.getInit();
4243  if (!copyExpr) {
4244    llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4245    Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4246                        caughtExnAlignment);
4247    LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4248    LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4249    CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4250    return;
4251  }
4252
4253  // We have to call __cxa_get_exception_ptr to get the adjusted
4254  // pointer before copying.
4255  llvm::CallInst *rawAdjustedExn =
4256    CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4257
4258  // Cast that to the appropriate type.
4259  Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4260                      caughtExnAlignment);
4261
4262  // The copy expression is defined in terms of an OpaqueValueExpr.
4263  // Find it and map it to the adjusted expression.
4264  CodeGenFunction::OpaqueValueMapping
4265    opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4266           CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4267
4268  // Call the copy ctor in a terminate scope.
4269  CGF.EHStack.pushTerminate();
4270
4271  // Perform the copy construction.
4272  CGF.EmitAggExpr(copyExpr,
4273                  AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4274                                        AggValueSlot::IsNotDestructed,
4275                                        AggValueSlot::DoesNotNeedGCBarriers,
4276                                        AggValueSlot::IsNotAliased,
4277                                        AggValueSlot::DoesNotOverlap));
4278
4279  // Leave the terminate scope.
4280  CGF.EHStack.popTerminate();
4281
4282  // Undo the opaque value mapping.
4283  opaque.pop();
4284
4285  // Finally we can call __cxa_begin_catch.
4286  CallBeginCatch(CGF, Exn, true);
4287}
4288
4289/// Begins a catch statement by initializing the catch variable and
4290/// calling __cxa_begin_catch.
4291void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4292                                   const CXXCatchStmt *S) {
4293  // We have to be very careful with the ordering of cleanups here:
4294  //   C++ [except.throw]p4:
4295  //     The destruction [of the exception temporary] occurs
4296  //     immediately after the destruction of the object declared in
4297  //     the exception-declaration in the handler.
4298  //
4299  // So the precise ordering is:
4300  //   1.  Construct catch variable.
4301  //   2.  __cxa_begin_catch
4302  //   3.  Enter __cxa_end_catch cleanup
4303  //   4.  Enter dtor cleanup
4304  //
4305  // We do this by using a slightly abnormal initialization process.
4306  // Delegation sequence:
4307  //   - ExitCXXTryStmt opens a RunCleanupsScope
4308  //     - EmitAutoVarAlloca creates the variable and debug info
4309  //       - InitCatchParam initializes the variable from the exception
4310  //       - CallBeginCatch calls __cxa_begin_catch
4311  //       - CallBeginCatch enters the __cxa_end_catch cleanup
4312  //     - EmitAutoVarCleanups enters the variable destructor cleanup
4313  //   - EmitCXXTryStmt emits the code for the catch body
4314  //   - EmitCXXTryStmt close the RunCleanupsScope
4315
4316  VarDecl *CatchParam = S->getExceptionDecl();
4317  if (!CatchParam) {
4318    llvm::Value *Exn = CGF.getExceptionFromSlot();
4319    CallBeginCatch(CGF, Exn, true);
4320    return;
4321  }
4322
4323  // Emit the local.
4324  CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4325  InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4326  CGF.EmitAutoVarCleanups(var);
4327}
4328
4329/// Get or define the following function:
4330///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4331/// This code is used only in C++.
4332static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4333  llvm::FunctionType *fnTy =
4334    llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4335  llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4336      fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4337  llvm::Function *fn =
4338      cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4339  if (fn->empty()) {
4340    fn->setDoesNotThrow();
4341    fn->setDoesNotReturn();
4342
4343    // What we really want is to massively penalize inlining without
4344    // forbidding it completely.  The difference between that and
4345    // 'noinline' is negligible.
4346    fn->addFnAttr(llvm::Attribute::NoInline);
4347
4348    // Allow this function to be shared across translation units, but
4349    // we don't want it to turn into an exported symbol.
4350    fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4351    fn->setVisibility(llvm::Function::HiddenVisibility);
4352    if (CGM.supportsCOMDAT())
4353      fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4354
4355    // Set up the function.
4356    llvm::BasicBlock *entry =
4357        llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4358    CGBuilderTy builder(CGM, entry);
4359
4360    // Pull the exception pointer out of the parameter list.
4361    llvm::Value *exn = &*fn->arg_begin();
4362
4363    // Call __cxa_begin_catch(exn).
4364    llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4365    catchCall->setDoesNotThrow();
4366    catchCall->setCallingConv(CGM.getRuntimeCC());
4367
4368    // Call std::terminate().
4369    llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4370    termCall->setDoesNotThrow();
4371    termCall->setDoesNotReturn();
4372    termCall->setCallingConv(CGM.getRuntimeCC());
4373
4374    // std::terminate cannot return.
4375    builder.CreateUnreachable();
4376  }
4377  return fnRef;
4378}
4379
4380llvm::CallInst *
4381ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4382                                                   llvm::Value *Exn) {
4383  // In C++, we want to call __cxa_begin_catch() before terminating.
4384  if (Exn) {
4385    assert(CGF.CGM.getLangOpts().CPlusPlus);
4386    return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4387  }
4388  return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4389}
4390
4391std::pair<llvm::Value *, const CXXRecordDecl *>
4392ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4393                             const CXXRecordDecl *RD) {
4394  return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4395}
4396
4397void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4398                                       const CXXCatchStmt *C) {
4399  if (CGF.getTarget().hasFeature("exception-handling"))
4400    CGF.EHStack.pushCleanup<CatchRetScope>(
4401        NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4402  ItaniumCXXABI::emitBeginCatch(CGF, C);
4403}
4404