1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This is the code that handles AST -> LLVM type lowering. 10// 11//===----------------------------------------------------------------------===// 12 13#include "CodeGenTypes.h" 14#include "CGCXXABI.h" 15#include "CGCall.h" 16#include "CGOpenCLRuntime.h" 17#include "CGRecordLayout.h" 18#include "TargetInfo.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/DeclCXX.h" 21#include "clang/AST/DeclObjC.h" 22#include "clang/AST/Expr.h" 23#include "clang/AST/RecordLayout.h" 24#include "clang/CodeGen/CGFunctionInfo.h" 25#include "llvm/IR/DataLayout.h" 26#include "llvm/IR/DerivedTypes.h" 27#include "llvm/IR/Module.h" 28using namespace clang; 29using namespace CodeGen; 30 31CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 32 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 33 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 34 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 35 SkippedLayout = false; 36} 37 38CodeGenTypes::~CodeGenTypes() { 39 for (llvm::FoldingSet<CGFunctionInfo>::iterator 40 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 41 delete &*I++; 42} 43 44const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { 45 return CGM.getCodeGenOpts(); 46} 47 48void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 49 llvm::StructType *Ty, 50 StringRef suffix) { 51 SmallString<256> TypeName; 52 llvm::raw_svector_ostream OS(TypeName); 53 OS << RD->getKindName() << '.'; 54 55 // Name the codegen type after the typedef name 56 // if there is no tag type name available 57 if (RD->getIdentifier()) { 58 // FIXME: We should not have to check for a null decl context here. 59 // Right now we do it because the implicit Obj-C decls don't have one. 60 if (RD->getDeclContext()) 61 RD->printQualifiedName(OS); 62 else 63 RD->printName(OS); 64 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 65 // FIXME: We should not have to check for a null decl context here. 66 // Right now we do it because the implicit Obj-C decls don't have one. 67 if (TDD->getDeclContext()) 68 TDD->printQualifiedName(OS); 69 else 70 TDD->printName(OS); 71 } else 72 OS << "anon"; 73 74 if (!suffix.empty()) 75 OS << suffix; 76 77 Ty->setName(OS.str()); 78} 79 80/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 81/// ConvertType in that it is used to convert to the memory representation for 82/// a type. For example, the scalar representation for _Bool is i1, but the 83/// memory representation is usually i8 or i32, depending on the target. 84llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) { 85 if (T->isConstantMatrixType()) { 86 const Type *Ty = Context.getCanonicalType(T).getTypePtr(); 87 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 88 return llvm::ArrayType::get(ConvertType(MT->getElementType()), 89 MT->getNumRows() * MT->getNumColumns()); 90 } 91 92 llvm::Type *R = ConvertType(T); 93 94 // If this is a bool type, or an ExtIntType in a bitfield representation, 95 // map this integer to the target-specified size. 96 if ((ForBitField && T->isExtIntType()) || R->isIntegerTy(1)) 97 return llvm::IntegerType::get(getLLVMContext(), 98 (unsigned)Context.getTypeSize(T)); 99 100 // Else, don't map it. 101 return R; 102} 103 104/// isRecordLayoutComplete - Return true if the specified type is already 105/// completely laid out. 106bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 107 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 108 RecordDeclTypes.find(Ty); 109 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 110} 111 112static bool 113isSafeToConvert(QualType T, CodeGenTypes &CGT, 114 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); 115 116 117/// isSafeToConvert - Return true if it is safe to convert the specified record 118/// decl to IR and lay it out, false if doing so would cause us to get into a 119/// recursive compilation mess. 120static bool 121isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, 122 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 123 // If we have already checked this type (maybe the same type is used by-value 124 // multiple times in multiple structure fields, don't check again. 125 if (!AlreadyChecked.insert(RD).second) 126 return true; 127 128 const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); 129 130 // If this type is already laid out, converting it is a noop. 131 if (CGT.isRecordLayoutComplete(Key)) return true; 132 133 // If this type is currently being laid out, we can't recursively compile it. 134 if (CGT.isRecordBeingLaidOut(Key)) 135 return false; 136 137 // If this type would require laying out bases that are currently being laid 138 // out, don't do it. This includes virtual base classes which get laid out 139 // when a class is translated, even though they aren't embedded by-value into 140 // the class. 141 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 142 for (const auto &I : CRD->bases()) 143 if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT, 144 AlreadyChecked)) 145 return false; 146 } 147 148 // If this type would require laying out members that are currently being laid 149 // out, don't do it. 150 for (const auto *I : RD->fields()) 151 if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) 152 return false; 153 154 // If there are no problems, lets do it. 155 return true; 156} 157 158/// isSafeToConvert - Return true if it is safe to convert this field type, 159/// which requires the structure elements contained by-value to all be 160/// recursively safe to convert. 161static bool 162isSafeToConvert(QualType T, CodeGenTypes &CGT, 163 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 164 // Strip off atomic type sugar. 165 if (const auto *AT = T->getAs<AtomicType>()) 166 T = AT->getValueType(); 167 168 // If this is a record, check it. 169 if (const auto *RT = T->getAs<RecordType>()) 170 return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); 171 172 // If this is an array, check the elements, which are embedded inline. 173 if (const auto *AT = CGT.getContext().getAsArrayType(T)) 174 return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); 175 176 // Otherwise, there is no concern about transforming this. We only care about 177 // things that are contained by-value in a structure that can have another 178 // structure as a member. 179 return true; 180} 181 182 183/// isSafeToConvert - Return true if it is safe to convert the specified record 184/// decl to IR and lay it out, false if doing so would cause us to get into a 185/// recursive compilation mess. 186static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { 187 // If no structs are being laid out, we can certainly do this one. 188 if (CGT.noRecordsBeingLaidOut()) return true; 189 190 llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; 191 return isSafeToConvert(RD, CGT, AlreadyChecked); 192} 193 194/// isFuncParamTypeConvertible - Return true if the specified type in a 195/// function parameter or result position can be converted to an IR type at this 196/// point. This boils down to being whether it is complete, as well as whether 197/// we've temporarily deferred expanding the type because we're in a recursive 198/// context. 199bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 200 // Some ABIs cannot have their member pointers represented in IR unless 201 // certain circumstances have been reached. 202 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 203 return getCXXABI().isMemberPointerConvertible(MPT); 204 205 // If this isn't a tagged type, we can convert it! 206 const TagType *TT = Ty->getAs<TagType>(); 207 if (!TT) return true; 208 209 // Incomplete types cannot be converted. 210 if (TT->isIncompleteType()) 211 return false; 212 213 // If this is an enum, then it is always safe to convert. 214 const RecordType *RT = dyn_cast<RecordType>(TT); 215 if (!RT) return true; 216 217 // Otherwise, we have to be careful. If it is a struct that we're in the 218 // process of expanding, then we can't convert the function type. That's ok 219 // though because we must be in a pointer context under the struct, so we can 220 // just convert it to a dummy type. 221 // 222 // We decide this by checking whether ConvertRecordDeclType returns us an 223 // opaque type for a struct that we know is defined. 224 return isSafeToConvert(RT->getDecl(), *this); 225} 226 227 228/// Code to verify a given function type is complete, i.e. the return type 229/// and all of the parameter types are complete. Also check to see if we are in 230/// a RS_StructPointer context, and if so whether any struct types have been 231/// pended. If so, we don't want to ask the ABI lowering code to handle a type 232/// that cannot be converted to an IR type. 233bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 234 if (!isFuncParamTypeConvertible(FT->getReturnType())) 235 return false; 236 237 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 238 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 239 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 240 return false; 241 242 return true; 243} 244 245/// UpdateCompletedType - When we find the full definition for a TagDecl, 246/// replace the 'opaque' type we previously made for it if applicable. 247void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 248 // If this is an enum being completed, then we flush all non-struct types from 249 // the cache. This allows function types and other things that may be derived 250 // from the enum to be recomputed. 251 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 252 // Only flush the cache if we've actually already converted this type. 253 if (TypeCache.count(ED->getTypeForDecl())) { 254 // Okay, we formed some types based on this. We speculated that the enum 255 // would be lowered to i32, so we only need to flush the cache if this 256 // didn't happen. 257 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 258 TypeCache.clear(); 259 } 260 // If necessary, provide the full definition of a type only used with a 261 // declaration so far. 262 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 263 DI->completeType(ED); 264 return; 265 } 266 267 // If we completed a RecordDecl that we previously used and converted to an 268 // anonymous type, then go ahead and complete it now. 269 const RecordDecl *RD = cast<RecordDecl>(TD); 270 if (RD->isDependentType()) return; 271 272 // Only complete it if we converted it already. If we haven't converted it 273 // yet, we'll just do it lazily. 274 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 275 ConvertRecordDeclType(RD); 276 277 // If necessary, provide the full definition of a type only used with a 278 // declaration so far. 279 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 280 DI->completeType(RD); 281} 282 283void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { 284 QualType T = Context.getRecordType(RD); 285 T = Context.getCanonicalType(T); 286 287 const Type *Ty = T.getTypePtr(); 288 if (RecordsWithOpaqueMemberPointers.count(Ty)) { 289 TypeCache.clear(); 290 RecordsWithOpaqueMemberPointers.clear(); 291 } 292} 293 294static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 295 const llvm::fltSemantics &format, 296 bool UseNativeHalf = false) { 297 if (&format == &llvm::APFloat::IEEEhalf()) { 298 if (UseNativeHalf) 299 return llvm::Type::getHalfTy(VMContext); 300 else 301 return llvm::Type::getInt16Ty(VMContext); 302 } 303 if (&format == &llvm::APFloat::BFloat()) 304 return llvm::Type::getBFloatTy(VMContext); 305 if (&format == &llvm::APFloat::IEEEsingle()) 306 return llvm::Type::getFloatTy(VMContext); 307 if (&format == &llvm::APFloat::IEEEdouble()) 308 return llvm::Type::getDoubleTy(VMContext); 309 if (&format == &llvm::APFloat::IEEEquad()) 310 return llvm::Type::getFP128Ty(VMContext); 311 if (&format == &llvm::APFloat::PPCDoubleDouble()) 312 return llvm::Type::getPPC_FP128Ty(VMContext); 313 if (&format == &llvm::APFloat::x87DoubleExtended()) 314 return llvm::Type::getX86_FP80Ty(VMContext); 315 llvm_unreachable("Unknown float format!"); 316} 317 318llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { 319 assert(QFT.isCanonical()); 320 const Type *Ty = QFT.getTypePtr(); 321 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); 322 // First, check whether we can build the full function type. If the 323 // function type depends on an incomplete type (e.g. a struct or enum), we 324 // cannot lower the function type. 325 if (!isFuncTypeConvertible(FT)) { 326 // This function's type depends on an incomplete tag type. 327 328 // Force conversion of all the relevant record types, to make sure 329 // we re-convert the FunctionType when appropriate. 330 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 331 ConvertRecordDeclType(RT->getDecl()); 332 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 333 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 334 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 335 ConvertRecordDeclType(RT->getDecl()); 336 337 SkippedLayout = true; 338 339 // Return a placeholder type. 340 return llvm::StructType::get(getLLVMContext()); 341 } 342 343 // While we're converting the parameter types for a function, we don't want 344 // to recursively convert any pointed-to structs. Converting directly-used 345 // structs is ok though. 346 if (!RecordsBeingLaidOut.insert(Ty).second) { 347 SkippedLayout = true; 348 return llvm::StructType::get(getLLVMContext()); 349 } 350 351 // The function type can be built; call the appropriate routines to 352 // build it. 353 const CGFunctionInfo *FI; 354 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 355 FI = &arrangeFreeFunctionType( 356 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 357 } else { 358 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 359 FI = &arrangeFreeFunctionType( 360 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 361 } 362 363 llvm::Type *ResultType = nullptr; 364 // If there is something higher level prodding our CGFunctionInfo, then 365 // don't recurse into it again. 366 if (FunctionsBeingProcessed.count(FI)) { 367 368 ResultType = llvm::StructType::get(getLLVMContext()); 369 SkippedLayout = true; 370 } else { 371 372 // Otherwise, we're good to go, go ahead and convert it. 373 ResultType = GetFunctionType(*FI); 374 } 375 376 RecordsBeingLaidOut.erase(Ty); 377 378 if (SkippedLayout) 379 TypeCache.clear(); 380 381 if (RecordsBeingLaidOut.empty()) 382 while (!DeferredRecords.empty()) 383 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 384 return ResultType; 385} 386 387/// ConvertType - Convert the specified type to its LLVM form. 388llvm::Type *CodeGenTypes::ConvertType(QualType T) { 389 T = Context.getCanonicalType(T); 390 391 const Type *Ty = T.getTypePtr(); 392 393 // For the device-side compilation, CUDA device builtin surface/texture types 394 // may be represented in different types. 395 if (Context.getLangOpts().CUDAIsDevice) { 396 if (T->isCUDADeviceBuiltinSurfaceType()) { 397 if (auto *Ty = CGM.getTargetCodeGenInfo() 398 .getCUDADeviceBuiltinSurfaceDeviceType()) 399 return Ty; 400 } else if (T->isCUDADeviceBuiltinTextureType()) { 401 if (auto *Ty = CGM.getTargetCodeGenInfo() 402 .getCUDADeviceBuiltinTextureDeviceType()) 403 return Ty; 404 } 405 } 406 407 // RecordTypes are cached and processed specially. 408 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 409 return ConvertRecordDeclType(RT->getDecl()); 410 411 // See if type is already cached. 412 llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); 413 // If type is found in map then use it. Otherwise, convert type T. 414 if (TCI != TypeCache.end()) 415 return TCI->second; 416 417 // If we don't have it in the cache, convert it now. 418 llvm::Type *ResultType = nullptr; 419 switch (Ty->getTypeClass()) { 420 case Type::Record: // Handled above. 421#define TYPE(Class, Base) 422#define ABSTRACT_TYPE(Class, Base) 423#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 424#define DEPENDENT_TYPE(Class, Base) case Type::Class: 425#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 426#include "clang/AST/TypeNodes.inc" 427 llvm_unreachable("Non-canonical or dependent types aren't possible."); 428 429 case Type::Builtin: { 430 switch (cast<BuiltinType>(Ty)->getKind()) { 431 case BuiltinType::Void: 432 case BuiltinType::ObjCId: 433 case BuiltinType::ObjCClass: 434 case BuiltinType::ObjCSel: 435 // LLVM void type can only be used as the result of a function call. Just 436 // map to the same as char. 437 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 438 break; 439 440 case BuiltinType::Bool: 441 // Note that we always return bool as i1 for use as a scalar type. 442 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 443 break; 444 445 case BuiltinType::Char_S: 446 case BuiltinType::Char_U: 447 case BuiltinType::SChar: 448 case BuiltinType::UChar: 449 case BuiltinType::Short: 450 case BuiltinType::UShort: 451 case BuiltinType::Int: 452 case BuiltinType::UInt: 453 case BuiltinType::Long: 454 case BuiltinType::ULong: 455 case BuiltinType::LongLong: 456 case BuiltinType::ULongLong: 457 case BuiltinType::WChar_S: 458 case BuiltinType::WChar_U: 459 case BuiltinType::Char8: 460 case BuiltinType::Char16: 461 case BuiltinType::Char32: 462 case BuiltinType::ShortAccum: 463 case BuiltinType::Accum: 464 case BuiltinType::LongAccum: 465 case BuiltinType::UShortAccum: 466 case BuiltinType::UAccum: 467 case BuiltinType::ULongAccum: 468 case BuiltinType::ShortFract: 469 case BuiltinType::Fract: 470 case BuiltinType::LongFract: 471 case BuiltinType::UShortFract: 472 case BuiltinType::UFract: 473 case BuiltinType::ULongFract: 474 case BuiltinType::SatShortAccum: 475 case BuiltinType::SatAccum: 476 case BuiltinType::SatLongAccum: 477 case BuiltinType::SatUShortAccum: 478 case BuiltinType::SatUAccum: 479 case BuiltinType::SatULongAccum: 480 case BuiltinType::SatShortFract: 481 case BuiltinType::SatFract: 482 case BuiltinType::SatLongFract: 483 case BuiltinType::SatUShortFract: 484 case BuiltinType::SatUFract: 485 case BuiltinType::SatULongFract: 486 ResultType = llvm::IntegerType::get(getLLVMContext(), 487 static_cast<unsigned>(Context.getTypeSize(T))); 488 break; 489 490 case BuiltinType::Float16: 491 ResultType = 492 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 493 /* UseNativeHalf = */ true); 494 break; 495 496 case BuiltinType::Half: 497 // Half FP can either be storage-only (lowered to i16) or native. 498 ResultType = getTypeForFormat( 499 getLLVMContext(), Context.getFloatTypeSemantics(T), 500 Context.getLangOpts().NativeHalfType || 501 !Context.getTargetInfo().useFP16ConversionIntrinsics()); 502 break; 503 case BuiltinType::BFloat16: 504 case BuiltinType::Float: 505 case BuiltinType::Double: 506 case BuiltinType::LongDouble: 507 case BuiltinType::Float128: 508 ResultType = getTypeForFormat(getLLVMContext(), 509 Context.getFloatTypeSemantics(T), 510 /* UseNativeHalf = */ false); 511 break; 512 513 case BuiltinType::NullPtr: 514 // Model std::nullptr_t as i8* 515 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 516 break; 517 518 case BuiltinType::UInt128: 519 case BuiltinType::Int128: 520 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 521 break; 522 523#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 524 case BuiltinType::Id: 525#include "clang/Basic/OpenCLImageTypes.def" 526#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 527 case BuiltinType::Id: 528#include "clang/Basic/OpenCLExtensionTypes.def" 529 case BuiltinType::OCLSampler: 530 case BuiltinType::OCLEvent: 531 case BuiltinType::OCLClkEvent: 532 case BuiltinType::OCLQueue: 533 case BuiltinType::OCLReserveID: 534 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 535 break; 536 case BuiltinType::SveInt8: 537 case BuiltinType::SveUint8: 538 case BuiltinType::SveInt8x2: 539 case BuiltinType::SveUint8x2: 540 case BuiltinType::SveInt8x3: 541 case BuiltinType::SveUint8x3: 542 case BuiltinType::SveInt8x4: 543 case BuiltinType::SveUint8x4: 544 case BuiltinType::SveInt16: 545 case BuiltinType::SveUint16: 546 case BuiltinType::SveInt16x2: 547 case BuiltinType::SveUint16x2: 548 case BuiltinType::SveInt16x3: 549 case BuiltinType::SveUint16x3: 550 case BuiltinType::SveInt16x4: 551 case BuiltinType::SveUint16x4: 552 case BuiltinType::SveInt32: 553 case BuiltinType::SveUint32: 554 case BuiltinType::SveInt32x2: 555 case BuiltinType::SveUint32x2: 556 case BuiltinType::SveInt32x3: 557 case BuiltinType::SveUint32x3: 558 case BuiltinType::SveInt32x4: 559 case BuiltinType::SveUint32x4: 560 case BuiltinType::SveInt64: 561 case BuiltinType::SveUint64: 562 case BuiltinType::SveInt64x2: 563 case BuiltinType::SveUint64x2: 564 case BuiltinType::SveInt64x3: 565 case BuiltinType::SveUint64x3: 566 case BuiltinType::SveInt64x4: 567 case BuiltinType::SveUint64x4: 568 case BuiltinType::SveBool: 569 case BuiltinType::SveFloat16: 570 case BuiltinType::SveFloat16x2: 571 case BuiltinType::SveFloat16x3: 572 case BuiltinType::SveFloat16x4: 573 case BuiltinType::SveFloat32: 574 case BuiltinType::SveFloat32x2: 575 case BuiltinType::SveFloat32x3: 576 case BuiltinType::SveFloat32x4: 577 case BuiltinType::SveFloat64: 578 case BuiltinType::SveFloat64x2: 579 case BuiltinType::SveFloat64x3: 580 case BuiltinType::SveFloat64x4: 581 case BuiltinType::SveBFloat16: 582 case BuiltinType::SveBFloat16x2: 583 case BuiltinType::SveBFloat16x3: 584 case BuiltinType::SveBFloat16x4: { 585 ASTContext::BuiltinVectorTypeInfo Info = 586 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 587 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 588 Info.EC.Min * Info.NumVectors); 589 } 590 case BuiltinType::Dependent: 591#define BUILTIN_TYPE(Id, SingletonId) 592#define PLACEHOLDER_TYPE(Id, SingletonId) \ 593 case BuiltinType::Id: 594#include "clang/AST/BuiltinTypes.def" 595 llvm_unreachable("Unexpected placeholder builtin type!"); 596 } 597 break; 598 } 599 case Type::Auto: 600 case Type::DeducedTemplateSpecialization: 601 llvm_unreachable("Unexpected undeduced type!"); 602 case Type::Complex: { 603 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 604 ResultType = llvm::StructType::get(EltTy, EltTy); 605 break; 606 } 607 case Type::LValueReference: 608 case Type::RValueReference: { 609 const ReferenceType *RTy = cast<ReferenceType>(Ty); 610 QualType ETy = RTy->getPointeeType(); 611 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 612 unsigned AS = Context.getTargetAddressSpace(ETy); 613 ResultType = llvm::PointerType::get(PointeeType, AS); 614 break; 615 } 616 case Type::Pointer: { 617 const PointerType *PTy = cast<PointerType>(Ty); 618 QualType ETy = PTy->getPointeeType(); 619 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 620 if (PointeeType->isVoidTy()) 621 PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 622 623 unsigned AS = PointeeType->isFunctionTy() 624 ? getDataLayout().getProgramAddressSpace() 625 : Context.getTargetAddressSpace(ETy); 626 627 ResultType = llvm::PointerType::get(PointeeType, AS); 628 break; 629 } 630 631 case Type::VariableArray: { 632 const VariableArrayType *A = cast<VariableArrayType>(Ty); 633 assert(A->getIndexTypeCVRQualifiers() == 0 && 634 "FIXME: We only handle trivial array types so far!"); 635 // VLAs resolve to the innermost element type; this matches 636 // the return of alloca, and there isn't any obviously better choice. 637 ResultType = ConvertTypeForMem(A->getElementType()); 638 break; 639 } 640 case Type::IncompleteArray: { 641 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 642 assert(A->getIndexTypeCVRQualifiers() == 0 && 643 "FIXME: We only handle trivial array types so far!"); 644 // int X[] -> [0 x int], unless the element type is not sized. If it is 645 // unsized (e.g. an incomplete struct) just use [0 x i8]. 646 ResultType = ConvertTypeForMem(A->getElementType()); 647 if (!ResultType->isSized()) { 648 SkippedLayout = true; 649 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 650 } 651 ResultType = llvm::ArrayType::get(ResultType, 0); 652 break; 653 } 654 case Type::ConstantArray: { 655 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 656 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 657 658 // Lower arrays of undefined struct type to arrays of i8 just to have a 659 // concrete type. 660 if (!EltTy->isSized()) { 661 SkippedLayout = true; 662 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 663 } 664 665 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 666 break; 667 } 668 case Type::ExtVector: 669 case Type::Vector: { 670 const VectorType *VT = cast<VectorType>(Ty); 671 ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()), 672 VT->getNumElements()); 673 break; 674 } 675 case Type::ConstantMatrix: { 676 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 677 ResultType = 678 llvm::FixedVectorType::get(ConvertType(MT->getElementType()), 679 MT->getNumRows() * MT->getNumColumns()); 680 break; 681 } 682 case Type::FunctionNoProto: 683 case Type::FunctionProto: 684 ResultType = ConvertFunctionTypeInternal(T); 685 break; 686 case Type::ObjCObject: 687 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 688 break; 689 690 case Type::ObjCInterface: { 691 // Objective-C interfaces are always opaque (outside of the 692 // runtime, which can do whatever it likes); we never refine 693 // these. 694 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 695 if (!T) 696 T = llvm::StructType::create(getLLVMContext()); 697 ResultType = T; 698 break; 699 } 700 701 case Type::ObjCObjectPointer: { 702 // Protocol qualifications do not influence the LLVM type, we just return a 703 // pointer to the underlying interface type. We don't need to worry about 704 // recursive conversion. 705 llvm::Type *T = 706 ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 707 ResultType = T->getPointerTo(); 708 break; 709 } 710 711 case Type::Enum: { 712 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 713 if (ED->isCompleteDefinition() || ED->isFixed()) 714 return ConvertType(ED->getIntegerType()); 715 // Return a placeholder 'i32' type. This can be changed later when the 716 // type is defined (see UpdateCompletedType), but is likely to be the 717 // "right" answer. 718 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 719 break; 720 } 721 722 case Type::BlockPointer: { 723 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 724 llvm::Type *PointeeType = CGM.getLangOpts().OpenCL 725 ? CGM.getGenericBlockLiteralType() 726 : ConvertTypeForMem(FTy); 727 unsigned AS = Context.getTargetAddressSpace(FTy); 728 ResultType = llvm::PointerType::get(PointeeType, AS); 729 break; 730 } 731 732 case Type::MemberPointer: { 733 auto *MPTy = cast<MemberPointerType>(Ty); 734 if (!getCXXABI().isMemberPointerConvertible(MPTy)) { 735 RecordsWithOpaqueMemberPointers.insert(MPTy->getClass()); 736 ResultType = llvm::StructType::create(getLLVMContext()); 737 } else { 738 ResultType = getCXXABI().ConvertMemberPointerType(MPTy); 739 } 740 break; 741 } 742 743 case Type::Atomic: { 744 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 745 ResultType = ConvertTypeForMem(valueType); 746 747 // Pad out to the inflated size if necessary. 748 uint64_t valueSize = Context.getTypeSize(valueType); 749 uint64_t atomicSize = Context.getTypeSize(Ty); 750 if (valueSize != atomicSize) { 751 assert(valueSize < atomicSize); 752 llvm::Type *elts[] = { 753 ResultType, 754 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 755 }; 756 ResultType = llvm::StructType::get(getLLVMContext(), 757 llvm::makeArrayRef(elts)); 758 } 759 break; 760 } 761 case Type::Pipe: { 762 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); 763 break; 764 } 765 case Type::ExtInt: { 766 const auto &EIT = cast<ExtIntType>(Ty); 767 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); 768 break; 769 } 770 } 771 772 assert(ResultType && "Didn't convert a type?"); 773 774 TypeCache[Ty] = ResultType; 775 return ResultType; 776} 777 778bool CodeGenModule::isPaddedAtomicType(QualType type) { 779 return isPaddedAtomicType(type->castAs<AtomicType>()); 780} 781 782bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 783 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 784} 785 786/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 787llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 788 // TagDecl's are not necessarily unique, instead use the (clang) 789 // type connected to the decl. 790 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 791 792 llvm::StructType *&Entry = RecordDeclTypes[Key]; 793 794 // If we don't have a StructType at all yet, create the forward declaration. 795 if (!Entry) { 796 Entry = llvm::StructType::create(getLLVMContext()); 797 addRecordTypeName(RD, Entry, ""); 798 } 799 llvm::StructType *Ty = Entry; 800 801 // If this is still a forward declaration, or the LLVM type is already 802 // complete, there's nothing more to do. 803 RD = RD->getDefinition(); 804 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 805 return Ty; 806 807 // If converting this type would cause us to infinitely loop, don't do it! 808 if (!isSafeToConvert(RD, *this)) { 809 DeferredRecords.push_back(RD); 810 return Ty; 811 } 812 813 // Okay, this is a definition of a type. Compile the implementation now. 814 bool InsertResult = RecordsBeingLaidOut.insert(Key).second; 815 (void)InsertResult; 816 assert(InsertResult && "Recursively compiling a struct?"); 817 818 // Force conversion of non-virtual base classes recursively. 819 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 820 for (const auto &I : CRD->bases()) { 821 if (I.isVirtual()) continue; 822 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); 823 } 824 } 825 826 // Layout fields. 827 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); 828 CGRecordLayouts[Key] = std::move(Layout); 829 830 // We're done laying out this struct. 831 bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; 832 assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); 833 834 // If this struct blocked a FunctionType conversion, then recompute whatever 835 // was derived from that. 836 // FIXME: This is hugely overconservative. 837 if (SkippedLayout) 838 TypeCache.clear(); 839 840 // If we're done converting the outer-most record, then convert any deferred 841 // structs as well. 842 if (RecordsBeingLaidOut.empty()) 843 while (!DeferredRecords.empty()) 844 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 845 846 return Ty; 847} 848 849/// getCGRecordLayout - Return record layout info for the given record decl. 850const CGRecordLayout & 851CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 852 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 853 854 auto I = CGRecordLayouts.find(Key); 855 if (I != CGRecordLayouts.end()) 856 return *I->second; 857 // Compute the type information. 858 ConvertRecordDeclType(RD); 859 860 // Now try again. 861 I = CGRecordLayouts.find(Key); 862 863 assert(I != CGRecordLayouts.end() && 864 "Unable to find record layout information for type"); 865 return *I->second; 866} 867 868bool CodeGenTypes::isPointerZeroInitializable(QualType T) { 869 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); 870 return isZeroInitializable(T); 871} 872 873bool CodeGenTypes::isZeroInitializable(QualType T) { 874 if (T->getAs<PointerType>()) 875 return Context.getTargetNullPointerValue(T) == 0; 876 877 if (const auto *AT = Context.getAsArrayType(T)) { 878 if (isa<IncompleteArrayType>(AT)) 879 return true; 880 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 881 if (Context.getConstantArrayElementCount(CAT) == 0) 882 return true; 883 T = Context.getBaseElementType(T); 884 } 885 886 // Records are non-zero-initializable if they contain any 887 // non-zero-initializable subobjects. 888 if (const RecordType *RT = T->getAs<RecordType>()) { 889 const RecordDecl *RD = RT->getDecl(); 890 return isZeroInitializable(RD); 891 } 892 893 // We have to ask the ABI about member pointers. 894 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 895 return getCXXABI().isZeroInitializable(MPT); 896 897 // Everything else is okay. 898 return true; 899} 900 901bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { 902 return getCGRecordLayout(RD).isZeroInitializable(); 903} 904