CGCall.cpp revision 292735
1//===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "ABIInfo.h" 17#include "CGCXXABI.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "TargetInfo.h" 21#include "clang/AST/Decl.h" 22#include "clang/AST/DeclCXX.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Basic/TargetInfo.h" 25#include "clang/CodeGen/CGFunctionInfo.h" 26#include "clang/Frontend/CodeGenOptions.h" 27#include "llvm/ADT/StringExtras.h" 28#include "llvm/IR/Attributes.h" 29#include "llvm/IR/CallSite.h" 30#include "llvm/IR/DataLayout.h" 31#include "llvm/IR/InlineAsm.h" 32#include "llvm/IR/Intrinsics.h" 33#include "llvm/IR/IntrinsicInst.h" 34#include "llvm/Transforms/Utils/Local.h" 35using namespace clang; 36using namespace CodeGen; 37 38/***/ 39 40static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 41 switch (CC) { 42 default: return llvm::CallingConv::C; 43 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 44 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 45 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 46 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 47 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 48 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 49 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 50 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 51 // TODO: Add support for __pascal to LLVM. 52 case CC_X86Pascal: return llvm::CallingConv::C; 53 // TODO: Add support for __vectorcall to LLVM. 54 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 55 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 56 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; 57 } 58} 59 60/// Derives the 'this' type for codegen purposes, i.e. ignoring method 61/// qualification. 62/// FIXME: address space qualification? 63static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 64 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 65 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 66} 67 68/// Returns the canonical formal type of the given C++ method. 69static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 70 return MD->getType()->getCanonicalTypeUnqualified() 71 .getAs<FunctionProtoType>(); 72} 73 74/// Returns the "extra-canonicalized" return type, which discards 75/// qualifiers on the return type. Codegen doesn't care about them, 76/// and it makes ABI code a little easier to be able to assume that 77/// all parameter and return types are top-level unqualified. 78static CanQualType GetReturnType(QualType RetTy) { 79 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 80} 81 82/// Arrange the argument and result information for a value of the given 83/// unprototyped freestanding function type. 84const CGFunctionInfo & 85CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 86 // When translating an unprototyped function type, always use a 87 // variadic type. 88 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 89 /*instanceMethod=*/false, 90 /*chainCall=*/false, None, 91 FTNP->getExtInfo(), RequiredArgs(0)); 92} 93 94/// Arrange the LLVM function layout for a value of the given function 95/// type, on top of any implicit parameters already stored. 96static const CGFunctionInfo & 97arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 98 SmallVectorImpl<CanQualType> &prefix, 99 CanQual<FunctionProtoType> FTP) { 100 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 101 // FIXME: Kill copy. 102 prefix.append(FTP->param_type_begin(), FTP->param_type_end()); 103 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 104 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 105 /*chainCall=*/false, prefix, 106 FTP->getExtInfo(), required); 107} 108 109/// Arrange the argument and result information for a value of the 110/// given freestanding function type. 111const CGFunctionInfo & 112CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 113 SmallVector<CanQualType, 16> argTypes; 114 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 115 FTP); 116} 117 118static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 119 // Set the appropriate calling convention for the Function. 120 if (D->hasAttr<StdCallAttr>()) 121 return CC_X86StdCall; 122 123 if (D->hasAttr<FastCallAttr>()) 124 return CC_X86FastCall; 125 126 if (D->hasAttr<ThisCallAttr>()) 127 return CC_X86ThisCall; 128 129 if (D->hasAttr<VectorCallAttr>()) 130 return CC_X86VectorCall; 131 132 if (D->hasAttr<PascalAttr>()) 133 return CC_X86Pascal; 134 135 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 136 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 137 138 if (D->hasAttr<IntelOclBiccAttr>()) 139 return CC_IntelOclBicc; 140 141 if (D->hasAttr<MSABIAttr>()) 142 return IsWindows ? CC_C : CC_X86_64Win64; 143 144 if (D->hasAttr<SysVABIAttr>()) 145 return IsWindows ? CC_X86_64SysV : CC_C; 146 147 return CC_C; 148} 149 150/// Arrange the argument and result information for a call to an 151/// unknown C++ non-static member function of the given abstract type. 152/// (Zero value of RD means we don't have any meaningful "this" argument type, 153/// so fall back to a generic pointer type). 154/// The member function must be an ordinary function, i.e. not a 155/// constructor or destructor. 156const CGFunctionInfo & 157CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 158 const FunctionProtoType *FTP) { 159 SmallVector<CanQualType, 16> argTypes; 160 161 // Add the 'this' pointer. 162 if (RD) 163 argTypes.push_back(GetThisType(Context, RD)); 164 else 165 argTypes.push_back(Context.VoidPtrTy); 166 167 return ::arrangeLLVMFunctionInfo( 168 *this, true, argTypes, 169 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 170} 171 172/// Arrange the argument and result information for a declaration or 173/// definition of the given C++ non-static member function. The 174/// member function must be an ordinary function, i.e. not a 175/// constructor or destructor. 176const CGFunctionInfo & 177CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 178 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 179 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 180 181 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 182 183 if (MD->isInstance()) { 184 // The abstract case is perfectly fine. 185 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 186 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 187 } 188 189 return arrangeFreeFunctionType(prototype); 190} 191 192const CGFunctionInfo & 193CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 194 StructorType Type) { 195 196 SmallVector<CanQualType, 16> argTypes; 197 argTypes.push_back(GetThisType(Context, MD->getParent())); 198 199 GlobalDecl GD; 200 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 201 GD = GlobalDecl(CD, toCXXCtorType(Type)); 202 } else { 203 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 204 GD = GlobalDecl(DD, toCXXDtorType(Type)); 205 } 206 207 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 208 209 // Add the formal parameters. 210 argTypes.append(FTP->param_type_begin(), FTP->param_type_end()); 211 212 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 213 214 RequiredArgs required = 215 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 216 217 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 218 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 219 ? argTypes.front() 220 : TheCXXABI.hasMostDerivedReturn(GD) 221 ? CGM.getContext().VoidPtrTy 222 : Context.VoidTy; 223 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 224 /*chainCall=*/false, argTypes, extInfo, 225 required); 226} 227 228/// Arrange a call to a C++ method, passing the given arguments. 229const CGFunctionInfo & 230CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 231 const CXXConstructorDecl *D, 232 CXXCtorType CtorKind, 233 unsigned ExtraArgs) { 234 // FIXME: Kill copy. 235 SmallVector<CanQualType, 16> ArgTypes; 236 for (const auto &Arg : args) 237 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 238 239 CanQual<FunctionProtoType> FPT = GetFormalType(D); 240 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 241 GlobalDecl GD(D, CtorKind); 242 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 243 ? ArgTypes.front() 244 : TheCXXABI.hasMostDerivedReturn(GD) 245 ? CGM.getContext().VoidPtrTy 246 : Context.VoidTy; 247 248 FunctionType::ExtInfo Info = FPT->getExtInfo(); 249 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 250 /*chainCall=*/false, ArgTypes, Info, 251 Required); 252} 253 254/// Arrange the argument and result information for the declaration or 255/// definition of the given function. 256const CGFunctionInfo & 257CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 258 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 259 if (MD->isInstance()) 260 return arrangeCXXMethodDeclaration(MD); 261 262 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 263 264 assert(isa<FunctionType>(FTy)); 265 266 // When declaring a function without a prototype, always use a 267 // non-variadic type. 268 if (isa<FunctionNoProtoType>(FTy)) { 269 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 270 return arrangeLLVMFunctionInfo( 271 noProto->getReturnType(), /*instanceMethod=*/false, 272 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All); 273 } 274 275 assert(isa<FunctionProtoType>(FTy)); 276 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 277} 278 279/// Arrange the argument and result information for the declaration or 280/// definition of an Objective-C method. 281const CGFunctionInfo & 282CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 283 // It happens that this is the same as a call with no optional 284 // arguments, except also using the formal 'self' type. 285 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 286} 287 288/// Arrange the argument and result information for the function type 289/// through which to perform a send to the given Objective-C method, 290/// using the given receiver type. The receiver type is not always 291/// the 'self' type of the method or even an Objective-C pointer type. 292/// This is *not* the right method for actually performing such a 293/// message send, due to the possibility of optional arguments. 294const CGFunctionInfo & 295CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 296 QualType receiverType) { 297 SmallVector<CanQualType, 16> argTys; 298 argTys.push_back(Context.getCanonicalParamType(receiverType)); 299 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 300 // FIXME: Kill copy? 301 for (const auto *I : MD->params()) { 302 argTys.push_back(Context.getCanonicalParamType(I->getType())); 303 } 304 305 FunctionType::ExtInfo einfo; 306 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 307 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 308 309 if (getContext().getLangOpts().ObjCAutoRefCount && 310 MD->hasAttr<NSReturnsRetainedAttr>()) 311 einfo = einfo.withProducesResult(true); 312 313 RequiredArgs required = 314 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 315 316 return arrangeLLVMFunctionInfo( 317 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 318 /*chainCall=*/false, argTys, einfo, required); 319} 320 321const CGFunctionInfo & 322CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 323 // FIXME: Do we need to handle ObjCMethodDecl? 324 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 325 326 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 327 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 328 329 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 330 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 331 332 return arrangeFunctionDeclaration(FD); 333} 334 335/// Arrange a thunk that takes 'this' as the first parameter followed by 336/// varargs. Return a void pointer, regardless of the actual return type. 337/// The body of the thunk will end in a musttail call to a function of the 338/// correct type, and the caller will bitcast the function to the correct 339/// prototype. 340const CGFunctionInfo & 341CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 342 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 343 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 344 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 345 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 346 /*chainCall=*/false, ArgTys, 347 FTP->getExtInfo(), RequiredArgs(1)); 348} 349 350const CGFunctionInfo & 351CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 352 CXXCtorType CT) { 353 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 354 355 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 356 SmallVector<CanQualType, 2> ArgTys; 357 const CXXRecordDecl *RD = CD->getParent(); 358 ArgTys.push_back(GetThisType(Context, RD)); 359 if (CT == Ctor_CopyingClosure) 360 ArgTys.push_back(*FTP->param_type_begin()); 361 if (RD->getNumVBases() > 0) 362 ArgTys.push_back(Context.IntTy); 363 CallingConv CC = Context.getDefaultCallingConvention( 364 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 365 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 366 /*chainCall=*/false, ArgTys, 367 FunctionType::ExtInfo(CC), RequiredArgs::All); 368} 369 370/// Arrange a call as unto a free function, except possibly with an 371/// additional number of formal parameters considered required. 372static const CGFunctionInfo & 373arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 374 CodeGenModule &CGM, 375 const CallArgList &args, 376 const FunctionType *fnType, 377 unsigned numExtraRequiredArgs, 378 bool chainCall) { 379 assert(args.size() >= numExtraRequiredArgs); 380 381 // In most cases, there are no optional arguments. 382 RequiredArgs required = RequiredArgs::All; 383 384 // If we have a variadic prototype, the required arguments are the 385 // extra prefix plus the arguments in the prototype. 386 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 387 if (proto->isVariadic()) 388 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 389 390 // If we don't have a prototype at all, but we're supposed to 391 // explicitly use the variadic convention for unprototyped calls, 392 // treat all of the arguments as required but preserve the nominal 393 // possibility of variadics. 394 } else if (CGM.getTargetCodeGenInfo() 395 .isNoProtoCallVariadic(args, 396 cast<FunctionNoProtoType>(fnType))) { 397 required = RequiredArgs(args.size()); 398 } 399 400 // FIXME: Kill copy. 401 SmallVector<CanQualType, 16> argTypes; 402 for (const auto &arg : args) 403 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 404 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 405 /*instanceMethod=*/false, chainCall, 406 argTypes, fnType->getExtInfo(), required); 407} 408 409/// Figure out the rules for calling a function with the given formal 410/// type using the given arguments. The arguments are necessary 411/// because the function might be unprototyped, in which case it's 412/// target-dependent in crazy ways. 413const CGFunctionInfo & 414CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 415 const FunctionType *fnType, 416 bool chainCall) { 417 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 418 chainCall ? 1 : 0, chainCall); 419} 420 421/// A block function call is essentially a free-function call with an 422/// extra implicit argument. 423const CGFunctionInfo & 424CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 425 const FunctionType *fnType) { 426 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 427 /*chainCall=*/false); 428} 429 430const CGFunctionInfo & 431CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 432 const CallArgList &args, 433 FunctionType::ExtInfo info, 434 RequiredArgs required) { 435 // FIXME: Kill copy. 436 SmallVector<CanQualType, 16> argTypes; 437 for (const auto &Arg : args) 438 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 439 return arrangeLLVMFunctionInfo( 440 GetReturnType(resultType), /*instanceMethod=*/false, 441 /*chainCall=*/false, argTypes, info, required); 442} 443 444/// Arrange a call to a C++ method, passing the given arguments. 445const CGFunctionInfo & 446CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 447 const FunctionProtoType *FPT, 448 RequiredArgs required) { 449 // FIXME: Kill copy. 450 SmallVector<CanQualType, 16> argTypes; 451 for (const auto &Arg : args) 452 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 453 454 FunctionType::ExtInfo info = FPT->getExtInfo(); 455 return arrangeLLVMFunctionInfo( 456 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true, 457 /*chainCall=*/false, argTypes, info, required); 458} 459 460const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 461 QualType resultType, const FunctionArgList &args, 462 const FunctionType::ExtInfo &info, bool isVariadic) { 463 // FIXME: Kill copy. 464 SmallVector<CanQualType, 16> argTypes; 465 for (auto Arg : args) 466 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 467 468 RequiredArgs required = 469 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 470 return arrangeLLVMFunctionInfo( 471 GetReturnType(resultType), /*instanceMethod=*/false, 472 /*chainCall=*/false, argTypes, info, required); 473} 474 475const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 476 return arrangeLLVMFunctionInfo( 477 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 478 None, FunctionType::ExtInfo(), RequiredArgs::All); 479} 480 481/// Arrange the argument and result information for an abstract value 482/// of a given function type. This is the method which all of the 483/// above functions ultimately defer to. 484const CGFunctionInfo & 485CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 486 bool instanceMethod, 487 bool chainCall, 488 ArrayRef<CanQualType> argTypes, 489 FunctionType::ExtInfo info, 490 RequiredArgs required) { 491 assert(std::all_of(argTypes.begin(), argTypes.end(), 492 std::mem_fun_ref(&CanQualType::isCanonicalAsParam))); 493 494 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 495 496 // Lookup or create unique function info. 497 llvm::FoldingSetNodeID ID; 498 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required, 499 resultType, argTypes); 500 501 void *insertPos = nullptr; 502 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 503 if (FI) 504 return *FI; 505 506 // Construct the function info. We co-allocate the ArgInfos. 507 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 508 resultType, argTypes, required); 509 FunctionInfos.InsertNode(FI, insertPos); 510 511 bool inserted = FunctionsBeingProcessed.insert(FI).second; 512 (void)inserted; 513 assert(inserted && "Recursively being processed?"); 514 515 // Compute ABI information. 516 getABIInfo().computeInfo(*FI); 517 518 // Loop over all of the computed argument and return value info. If any of 519 // them are direct or extend without a specified coerce type, specify the 520 // default now. 521 ABIArgInfo &retInfo = FI->getReturnInfo(); 522 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 523 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 524 525 for (auto &I : FI->arguments()) 526 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 527 I.info.setCoerceToType(ConvertType(I.type)); 528 529 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 530 assert(erased && "Not in set?"); 531 532 return *FI; 533} 534 535CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 536 bool instanceMethod, 537 bool chainCall, 538 const FunctionType::ExtInfo &info, 539 CanQualType resultType, 540 ArrayRef<CanQualType> argTypes, 541 RequiredArgs required) { 542 void *buffer = operator new(sizeof(CGFunctionInfo) + 543 sizeof(ArgInfo) * (argTypes.size() + 1)); 544 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 545 FI->CallingConvention = llvmCC; 546 FI->EffectiveCallingConvention = llvmCC; 547 FI->ASTCallingConvention = info.getCC(); 548 FI->InstanceMethod = instanceMethod; 549 FI->ChainCall = chainCall; 550 FI->NoReturn = info.getNoReturn(); 551 FI->ReturnsRetained = info.getProducesResult(); 552 FI->Required = required; 553 FI->HasRegParm = info.getHasRegParm(); 554 FI->RegParm = info.getRegParm(); 555 FI->ArgStruct = nullptr; 556 FI->NumArgs = argTypes.size(); 557 FI->getArgsBuffer()[0].type = resultType; 558 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 559 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 560 return FI; 561} 562 563/***/ 564 565namespace { 566// ABIArgInfo::Expand implementation. 567 568// Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 569struct TypeExpansion { 570 enum TypeExpansionKind { 571 // Elements of constant arrays are expanded recursively. 572 TEK_ConstantArray, 573 // Record fields are expanded recursively (but if record is a union, only 574 // the field with the largest size is expanded). 575 TEK_Record, 576 // For complex types, real and imaginary parts are expanded recursively. 577 TEK_Complex, 578 // All other types are not expandable. 579 TEK_None 580 }; 581 582 const TypeExpansionKind Kind; 583 584 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 585 virtual ~TypeExpansion() {} 586}; 587 588struct ConstantArrayExpansion : TypeExpansion { 589 QualType EltTy; 590 uint64_t NumElts; 591 592 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 593 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 594 static bool classof(const TypeExpansion *TE) { 595 return TE->Kind == TEK_ConstantArray; 596 } 597}; 598 599struct RecordExpansion : TypeExpansion { 600 SmallVector<const CXXBaseSpecifier *, 1> Bases; 601 602 SmallVector<const FieldDecl *, 1> Fields; 603 604 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 605 SmallVector<const FieldDecl *, 1> &&Fields) 606 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} 607 static bool classof(const TypeExpansion *TE) { 608 return TE->Kind == TEK_Record; 609 } 610}; 611 612struct ComplexExpansion : TypeExpansion { 613 QualType EltTy; 614 615 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 616 static bool classof(const TypeExpansion *TE) { 617 return TE->Kind == TEK_Complex; 618 } 619}; 620 621struct NoExpansion : TypeExpansion { 622 NoExpansion() : TypeExpansion(TEK_None) {} 623 static bool classof(const TypeExpansion *TE) { 624 return TE->Kind == TEK_None; 625 } 626}; 627} // namespace 628 629static std::unique_ptr<TypeExpansion> 630getTypeExpansion(QualType Ty, const ASTContext &Context) { 631 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 632 return llvm::make_unique<ConstantArrayExpansion>( 633 AT->getElementType(), AT->getSize().getZExtValue()); 634 } 635 if (const RecordType *RT = Ty->getAs<RecordType>()) { 636 SmallVector<const CXXBaseSpecifier *, 1> Bases; 637 SmallVector<const FieldDecl *, 1> Fields; 638 const RecordDecl *RD = RT->getDecl(); 639 assert(!RD->hasFlexibleArrayMember() && 640 "Cannot expand structure with flexible array."); 641 if (RD->isUnion()) { 642 // Unions can be here only in degenerative cases - all the fields are same 643 // after flattening. Thus we have to use the "largest" field. 644 const FieldDecl *LargestFD = nullptr; 645 CharUnits UnionSize = CharUnits::Zero(); 646 647 for (const auto *FD : RD->fields()) { 648 // Skip zero length bitfields. 649 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 650 continue; 651 assert(!FD->isBitField() && 652 "Cannot expand structure with bit-field members."); 653 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 654 if (UnionSize < FieldSize) { 655 UnionSize = FieldSize; 656 LargestFD = FD; 657 } 658 } 659 if (LargestFD) 660 Fields.push_back(LargestFD); 661 } else { 662 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 663 assert(!CXXRD->isDynamicClass() && 664 "cannot expand vtable pointers in dynamic classes"); 665 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 666 Bases.push_back(&BS); 667 } 668 669 for (const auto *FD : RD->fields()) { 670 // Skip zero length bitfields. 671 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 672 continue; 673 assert(!FD->isBitField() && 674 "Cannot expand structure with bit-field members."); 675 Fields.push_back(FD); 676 } 677 } 678 return llvm::make_unique<RecordExpansion>(std::move(Bases), 679 std::move(Fields)); 680 } 681 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 682 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 683 } 684 return llvm::make_unique<NoExpansion>(); 685} 686 687static int getExpansionSize(QualType Ty, const ASTContext &Context) { 688 auto Exp = getTypeExpansion(Ty, Context); 689 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 690 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 691 } 692 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 693 int Res = 0; 694 for (auto BS : RExp->Bases) 695 Res += getExpansionSize(BS->getType(), Context); 696 for (auto FD : RExp->Fields) 697 Res += getExpansionSize(FD->getType(), Context); 698 return Res; 699 } 700 if (isa<ComplexExpansion>(Exp.get())) 701 return 2; 702 assert(isa<NoExpansion>(Exp.get())); 703 return 1; 704} 705 706void 707CodeGenTypes::getExpandedTypes(QualType Ty, 708 SmallVectorImpl<llvm::Type *>::iterator &TI) { 709 auto Exp = getTypeExpansion(Ty, Context); 710 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 711 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 712 getExpandedTypes(CAExp->EltTy, TI); 713 } 714 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 715 for (auto BS : RExp->Bases) 716 getExpandedTypes(BS->getType(), TI); 717 for (auto FD : RExp->Fields) 718 getExpandedTypes(FD->getType(), TI); 719 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 720 llvm::Type *EltTy = ConvertType(CExp->EltTy); 721 *TI++ = EltTy; 722 *TI++ = EltTy; 723 } else { 724 assert(isa<NoExpansion>(Exp.get())); 725 *TI++ = ConvertType(Ty); 726 } 727} 728 729void CodeGenFunction::ExpandTypeFromArgs( 730 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 731 assert(LV.isSimple() && 732 "Unexpected non-simple lvalue during struct expansion."); 733 734 auto Exp = getTypeExpansion(Ty, getContext()); 735 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 736 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 737 llvm::Value *EltAddr = 738 Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i); 739 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 740 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 741 } 742 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 743 llvm::Value *This = LV.getAddress(); 744 for (const CXXBaseSpecifier *BS : RExp->Bases) { 745 // Perform a single step derived-to-base conversion. 746 llvm::Value *Base = 747 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 748 /*NullCheckValue=*/false, SourceLocation()); 749 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 750 751 // Recurse onto bases. 752 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 753 } 754 for (auto FD : RExp->Fields) { 755 // FIXME: What are the right qualifiers here? 756 LValue SubLV = EmitLValueForField(LV, FD); 757 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 758 } 759 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 760 llvm::Value *RealAddr = 761 Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real"); 762 EmitStoreThroughLValue(RValue::get(*AI++), 763 MakeAddrLValue(RealAddr, CExp->EltTy)); 764 llvm::Value *ImagAddr = 765 Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag"); 766 EmitStoreThroughLValue(RValue::get(*AI++), 767 MakeAddrLValue(ImagAddr, CExp->EltTy)); 768 } else { 769 assert(isa<NoExpansion>(Exp.get())); 770 EmitStoreThroughLValue(RValue::get(*AI++), LV); 771 } 772} 773 774void CodeGenFunction::ExpandTypeToArgs( 775 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 776 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 777 auto Exp = getTypeExpansion(Ty, getContext()); 778 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 779 llvm::Value *Addr = RV.getAggregateAddr(); 780 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 781 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i); 782 RValue EltRV = 783 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 784 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 785 } 786 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 787 llvm::Value *This = RV.getAggregateAddr(); 788 for (const CXXBaseSpecifier *BS : RExp->Bases) { 789 // Perform a single step derived-to-base conversion. 790 llvm::Value *Base = 791 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 792 /*NullCheckValue=*/false, SourceLocation()); 793 RValue BaseRV = RValue::getAggregate(Base); 794 795 // Recurse onto bases. 796 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 797 IRCallArgPos); 798 } 799 800 LValue LV = MakeAddrLValue(This, Ty); 801 for (auto FD : RExp->Fields) { 802 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 803 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 804 IRCallArgPos); 805 } 806 } else if (isa<ComplexExpansion>(Exp.get())) { 807 ComplexPairTy CV = RV.getComplexVal(); 808 IRCallArgs[IRCallArgPos++] = CV.first; 809 IRCallArgs[IRCallArgPos++] = CV.second; 810 } else { 811 assert(isa<NoExpansion>(Exp.get())); 812 assert(RV.isScalar() && 813 "Unexpected non-scalar rvalue during struct expansion."); 814 815 // Insert a bitcast as needed. 816 llvm::Value *V = RV.getScalarVal(); 817 if (IRCallArgPos < IRFuncTy->getNumParams() && 818 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 819 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 820 821 IRCallArgs[IRCallArgPos++] = V; 822 } 823} 824 825/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 826/// accessing some number of bytes out of it, try to gep into the struct to get 827/// at its inner goodness. Dive as deep as possible without entering an element 828/// with an in-memory size smaller than DstSize. 829static llvm::Value * 830EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 831 llvm::StructType *SrcSTy, 832 uint64_t DstSize, CodeGenFunction &CGF) { 833 // We can't dive into a zero-element struct. 834 if (SrcSTy->getNumElements() == 0) return SrcPtr; 835 836 llvm::Type *FirstElt = SrcSTy->getElementType(0); 837 838 // If the first elt is at least as large as what we're looking for, or if the 839 // first element is the same size as the whole struct, we can enter it. The 840 // comparison must be made on the store size and not the alloca size. Using 841 // the alloca size may overstate the size of the load. 842 uint64_t FirstEltSize = 843 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 844 if (FirstEltSize < DstSize && 845 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 846 return SrcPtr; 847 848 // GEP into the first element. 849 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive"); 850 851 // If the first element is a struct, recurse. 852 llvm::Type *SrcTy = 853 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 854 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 855 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 856 857 return SrcPtr; 858} 859 860/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 861/// are either integers or pointers. This does a truncation of the value if it 862/// is too large or a zero extension if it is too small. 863/// 864/// This behaves as if the value were coerced through memory, so on big-endian 865/// targets the high bits are preserved in a truncation, while little-endian 866/// targets preserve the low bits. 867static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 868 llvm::Type *Ty, 869 CodeGenFunction &CGF) { 870 if (Val->getType() == Ty) 871 return Val; 872 873 if (isa<llvm::PointerType>(Val->getType())) { 874 // If this is Pointer->Pointer avoid conversion to and from int. 875 if (isa<llvm::PointerType>(Ty)) 876 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 877 878 // Convert the pointer to an integer so we can play with its width. 879 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 880 } 881 882 llvm::Type *DestIntTy = Ty; 883 if (isa<llvm::PointerType>(DestIntTy)) 884 DestIntTy = CGF.IntPtrTy; 885 886 if (Val->getType() != DestIntTy) { 887 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 888 if (DL.isBigEndian()) { 889 // Preserve the high bits on big-endian targets. 890 // That is what memory coercion does. 891 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 892 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 893 894 if (SrcSize > DstSize) { 895 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 896 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 897 } else { 898 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 899 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 900 } 901 } else { 902 // Little-endian targets preserve the low bits. No shifts required. 903 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 904 } 905 } 906 907 if (isa<llvm::PointerType>(Ty)) 908 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 909 return Val; 910} 911 912 913 914/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 915/// a pointer to an object of type \arg Ty, known to be aligned to 916/// \arg SrcAlign bytes. 917/// 918/// This safely handles the case when the src type is smaller than the 919/// destination type; in this situation the values of bits which not 920/// present in the src are undefined. 921static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 922 llvm::Type *Ty, CharUnits SrcAlign, 923 CodeGenFunction &CGF) { 924 llvm::Type *SrcTy = 925 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 926 927 // If SrcTy and Ty are the same, just do a load. 928 if (SrcTy == Ty) 929 return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity()); 930 931 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 932 933 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 934 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 935 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 936 } 937 938 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 939 940 // If the source and destination are integer or pointer types, just do an 941 // extension or truncation to the desired type. 942 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 943 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 944 llvm::LoadInst *Load = 945 CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity()); 946 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 947 } 948 949 // If load is legal, just bitcast the src pointer. 950 if (SrcSize >= DstSize) { 951 // Generally SrcSize is never greater than DstSize, since this means we are 952 // losing bits. However, this can happen in cases where the structure has 953 // additional padding, for example due to a user specified alignment. 954 // 955 // FIXME: Assert that we aren't truncating non-padding bits when have access 956 // to that information. 957 llvm::Value *Casted = 958 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 959 return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity()); 960 } 961 962 // Otherwise do coercion through memory. This is stupid, but 963 // simple. 964 llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty); 965 Tmp->setAlignment(SrcAlign.getQuantity()); 966 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 967 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 968 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 969 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 970 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 971 SrcAlign.getQuantity(), false); 972 return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity()); 973} 974 975// Function to store a first-class aggregate into memory. We prefer to 976// store the elements rather than the aggregate to be more friendly to 977// fast-isel. 978// FIXME: Do we need to recurse here? 979static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 980 llvm::Value *DestPtr, bool DestIsVolatile, 981 CharUnits DestAlign) { 982 // Prefer scalar stores to first-class aggregate stores. 983 if (llvm::StructType *STy = 984 dyn_cast<llvm::StructType>(Val->getType())) { 985 const llvm::StructLayout *Layout = 986 CGF.CGM.getDataLayout().getStructLayout(STy); 987 988 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 989 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i); 990 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 991 uint64_t EltOffset = Layout->getElementOffset(i); 992 CharUnits EltAlign = 993 DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset)); 994 CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(), 995 DestIsVolatile); 996 } 997 } else { 998 CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(), 999 DestIsVolatile); 1000 } 1001} 1002 1003/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1004/// where the source and destination may have different types. The 1005/// destination is known to be aligned to \arg DstAlign bytes. 1006/// 1007/// This safely handles the case when the src type is larger than the 1008/// destination type; the upper bits of the src will be lost. 1009static void CreateCoercedStore(llvm::Value *Src, 1010 llvm::Value *DstPtr, 1011 bool DstIsVolatile, 1012 CharUnits DstAlign, 1013 CodeGenFunction &CGF) { 1014 llvm::Type *SrcTy = Src->getType(); 1015 llvm::Type *DstTy = 1016 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1017 if (SrcTy == DstTy) { 1018 CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(), 1019 DstIsVolatile); 1020 return; 1021 } 1022 1023 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1024 1025 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1026 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 1027 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1028 } 1029 1030 // If the source and destination are integer or pointer types, just do an 1031 // extension or truncation to the desired type. 1032 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1033 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1034 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1035 CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(), 1036 DstIsVolatile); 1037 return; 1038 } 1039 1040 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1041 1042 // If store is legal, just bitcast the src pointer. 1043 if (SrcSize <= DstSize) { 1044 llvm::Value *Casted = 1045 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1046 BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign); 1047 } else { 1048 // Otherwise do coercion through memory. This is stupid, but 1049 // simple. 1050 1051 // Generally SrcSize is never greater than DstSize, since this means we are 1052 // losing bits. However, this can happen in cases where the structure has 1053 // additional padding, for example due to a user specified alignment. 1054 // 1055 // FIXME: Assert that we aren't truncating non-padding bits when have access 1056 // to that information. 1057 llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy); 1058 Tmp->setAlignment(DstAlign.getQuantity()); 1059 CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity()); 1060 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 1061 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 1062 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 1063 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1064 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1065 DstAlign.getQuantity(), false); 1066 } 1067} 1068 1069namespace { 1070 1071/// Encapsulates information about the way function arguments from 1072/// CGFunctionInfo should be passed to actual LLVM IR function. 1073class ClangToLLVMArgMapping { 1074 static const unsigned InvalidIndex = ~0U; 1075 unsigned InallocaArgNo; 1076 unsigned SRetArgNo; 1077 unsigned TotalIRArgs; 1078 1079 /// Arguments of LLVM IR function corresponding to single Clang argument. 1080 struct IRArgs { 1081 unsigned PaddingArgIndex; 1082 // Argument is expanded to IR arguments at positions 1083 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1084 unsigned FirstArgIndex; 1085 unsigned NumberOfArgs; 1086 1087 IRArgs() 1088 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1089 NumberOfArgs(0) {} 1090 }; 1091 1092 SmallVector<IRArgs, 8> ArgInfo; 1093 1094public: 1095 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1096 bool OnlyRequiredArgs = false) 1097 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1098 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1099 construct(Context, FI, OnlyRequiredArgs); 1100 } 1101 1102 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1103 unsigned getInallocaArgNo() const { 1104 assert(hasInallocaArg()); 1105 return InallocaArgNo; 1106 } 1107 1108 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1109 unsigned getSRetArgNo() const { 1110 assert(hasSRetArg()); 1111 return SRetArgNo; 1112 } 1113 1114 unsigned totalIRArgs() const { return TotalIRArgs; } 1115 1116 bool hasPaddingArg(unsigned ArgNo) const { 1117 assert(ArgNo < ArgInfo.size()); 1118 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1119 } 1120 unsigned getPaddingArgNo(unsigned ArgNo) const { 1121 assert(hasPaddingArg(ArgNo)); 1122 return ArgInfo[ArgNo].PaddingArgIndex; 1123 } 1124 1125 /// Returns index of first IR argument corresponding to ArgNo, and their 1126 /// quantity. 1127 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1128 assert(ArgNo < ArgInfo.size()); 1129 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1130 ArgInfo[ArgNo].NumberOfArgs); 1131 } 1132 1133private: 1134 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1135 bool OnlyRequiredArgs); 1136}; 1137 1138void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1139 const CGFunctionInfo &FI, 1140 bool OnlyRequiredArgs) { 1141 unsigned IRArgNo = 0; 1142 bool SwapThisWithSRet = false; 1143 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1144 1145 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1146 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1147 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1148 } 1149 1150 unsigned ArgNo = 0; 1151 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1152 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1153 ++I, ++ArgNo) { 1154 assert(I != FI.arg_end()); 1155 QualType ArgType = I->type; 1156 const ABIArgInfo &AI = I->info; 1157 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1158 auto &IRArgs = ArgInfo[ArgNo]; 1159 1160 if (AI.getPaddingType()) 1161 IRArgs.PaddingArgIndex = IRArgNo++; 1162 1163 switch (AI.getKind()) { 1164 case ABIArgInfo::Extend: 1165 case ABIArgInfo::Direct: { 1166 // FIXME: handle sseregparm someday... 1167 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1168 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1169 IRArgs.NumberOfArgs = STy->getNumElements(); 1170 } else { 1171 IRArgs.NumberOfArgs = 1; 1172 } 1173 break; 1174 } 1175 case ABIArgInfo::Indirect: 1176 IRArgs.NumberOfArgs = 1; 1177 break; 1178 case ABIArgInfo::Ignore: 1179 case ABIArgInfo::InAlloca: 1180 // ignore and inalloca doesn't have matching LLVM parameters. 1181 IRArgs.NumberOfArgs = 0; 1182 break; 1183 case ABIArgInfo::Expand: { 1184 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1185 break; 1186 } 1187 } 1188 1189 if (IRArgs.NumberOfArgs > 0) { 1190 IRArgs.FirstArgIndex = IRArgNo; 1191 IRArgNo += IRArgs.NumberOfArgs; 1192 } 1193 1194 // Skip over the sret parameter when it comes second. We already handled it 1195 // above. 1196 if (IRArgNo == 1 && SwapThisWithSRet) 1197 IRArgNo++; 1198 } 1199 assert(ArgNo == ArgInfo.size()); 1200 1201 if (FI.usesInAlloca()) 1202 InallocaArgNo = IRArgNo++; 1203 1204 TotalIRArgs = IRArgNo; 1205} 1206} // namespace 1207 1208/***/ 1209 1210bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1211 return FI.getReturnInfo().isIndirect(); 1212} 1213 1214bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1215 return ReturnTypeUsesSRet(FI) && 1216 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1217} 1218 1219bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1220 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1221 switch (BT->getKind()) { 1222 default: 1223 return false; 1224 case BuiltinType::Float: 1225 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1226 case BuiltinType::Double: 1227 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1228 case BuiltinType::LongDouble: 1229 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1230 } 1231 } 1232 1233 return false; 1234} 1235 1236bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1237 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1238 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1239 if (BT->getKind() == BuiltinType::LongDouble) 1240 return getTarget().useObjCFP2RetForComplexLongDouble(); 1241 } 1242 } 1243 1244 return false; 1245} 1246 1247llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1248 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1249 return GetFunctionType(FI); 1250} 1251 1252llvm::FunctionType * 1253CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1254 1255 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1256 (void)Inserted; 1257 assert(Inserted && "Recursively being processed?"); 1258 1259 llvm::Type *resultType = nullptr; 1260 const ABIArgInfo &retAI = FI.getReturnInfo(); 1261 switch (retAI.getKind()) { 1262 case ABIArgInfo::Expand: 1263 llvm_unreachable("Invalid ABI kind for return argument"); 1264 1265 case ABIArgInfo::Extend: 1266 case ABIArgInfo::Direct: 1267 resultType = retAI.getCoerceToType(); 1268 break; 1269 1270 case ABIArgInfo::InAlloca: 1271 if (retAI.getInAllocaSRet()) { 1272 // sret things on win32 aren't void, they return the sret pointer. 1273 QualType ret = FI.getReturnType(); 1274 llvm::Type *ty = ConvertType(ret); 1275 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1276 resultType = llvm::PointerType::get(ty, addressSpace); 1277 } else { 1278 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1279 } 1280 break; 1281 1282 case ABIArgInfo::Indirect: 1283 case ABIArgInfo::Ignore: 1284 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1285 break; 1286 } 1287 1288 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1289 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1290 1291 // Add type for sret argument. 1292 if (IRFunctionArgs.hasSRetArg()) { 1293 QualType Ret = FI.getReturnType(); 1294 llvm::Type *Ty = ConvertType(Ret); 1295 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1296 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1297 llvm::PointerType::get(Ty, AddressSpace); 1298 } 1299 1300 // Add type for inalloca argument. 1301 if (IRFunctionArgs.hasInallocaArg()) { 1302 auto ArgStruct = FI.getArgStruct(); 1303 assert(ArgStruct); 1304 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1305 } 1306 1307 // Add in all of the required arguments. 1308 unsigned ArgNo = 0; 1309 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1310 ie = it + FI.getNumRequiredArgs(); 1311 for (; it != ie; ++it, ++ArgNo) { 1312 const ABIArgInfo &ArgInfo = it->info; 1313 1314 // Insert a padding type to ensure proper alignment. 1315 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1316 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1317 ArgInfo.getPaddingType(); 1318 1319 unsigned FirstIRArg, NumIRArgs; 1320 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1321 1322 switch (ArgInfo.getKind()) { 1323 case ABIArgInfo::Ignore: 1324 case ABIArgInfo::InAlloca: 1325 assert(NumIRArgs == 0); 1326 break; 1327 1328 case ABIArgInfo::Indirect: { 1329 assert(NumIRArgs == 1); 1330 // indirect arguments are always on the stack, which is addr space #0. 1331 llvm::Type *LTy = ConvertTypeForMem(it->type); 1332 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 1333 break; 1334 } 1335 1336 case ABIArgInfo::Extend: 1337 case ABIArgInfo::Direct: { 1338 // Fast-isel and the optimizer generally like scalar values better than 1339 // FCAs, so we flatten them if this is safe to do for this argument. 1340 llvm::Type *argType = ArgInfo.getCoerceToType(); 1341 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1342 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1343 assert(NumIRArgs == st->getNumElements()); 1344 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1345 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1346 } else { 1347 assert(NumIRArgs == 1); 1348 ArgTypes[FirstIRArg] = argType; 1349 } 1350 break; 1351 } 1352 1353 case ABIArgInfo::Expand: 1354 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1355 getExpandedTypes(it->type, ArgTypesIter); 1356 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1357 break; 1358 } 1359 } 1360 1361 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1362 assert(Erased && "Not in set?"); 1363 1364 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1365} 1366 1367llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1368 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1369 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1370 1371 if (!isFuncTypeConvertible(FPT)) 1372 return llvm::StructType::get(getLLVMContext()); 1373 1374 const CGFunctionInfo *Info; 1375 if (isa<CXXDestructorDecl>(MD)) 1376 Info = 1377 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1378 else 1379 Info = &arrangeCXXMethodDeclaration(MD); 1380 return GetFunctionType(*Info); 1381} 1382 1383void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1384 const Decl *TargetDecl, 1385 AttributeListType &PAL, 1386 unsigned &CallingConv, 1387 bool AttrOnCallSite) { 1388 llvm::AttrBuilder FuncAttrs; 1389 llvm::AttrBuilder RetAttrs; 1390 bool HasOptnone = false; 1391 1392 CallingConv = FI.getEffectiveCallingConvention(); 1393 1394 if (FI.isNoReturn()) 1395 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1396 1397 // FIXME: handle sseregparm someday... 1398 if (TargetDecl) { 1399 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1400 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1401 if (TargetDecl->hasAttr<NoThrowAttr>()) 1402 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1403 if (TargetDecl->hasAttr<NoReturnAttr>()) 1404 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1405 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1406 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1407 1408 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1409 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1410 if (FPT && FPT->isNothrow(getContext())) 1411 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1412 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1413 // These attributes are not inherited by overloads. 1414 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1415 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1416 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1417 } 1418 1419 // 'const' and 'pure' attribute functions are also nounwind. 1420 if (TargetDecl->hasAttr<ConstAttr>()) { 1421 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1422 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1423 } else if (TargetDecl->hasAttr<PureAttr>()) { 1424 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1425 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1426 } 1427 if (TargetDecl->hasAttr<RestrictAttr>()) 1428 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1429 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1430 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1431 1432 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1433 } 1434 1435 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1436 if (!HasOptnone) { 1437 if (CodeGenOpts.OptimizeSize) 1438 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1439 if (CodeGenOpts.OptimizeSize == 2) 1440 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1441 } 1442 1443 if (CodeGenOpts.DisableRedZone) 1444 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1445 if (CodeGenOpts.NoImplicitFloat) 1446 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1447 if (CodeGenOpts.EnableSegmentedStacks && 1448 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1449 FuncAttrs.addAttribute("split-stack"); 1450 1451 if (AttrOnCallSite) { 1452 // Attributes that should go on the call site only. 1453 if (!CodeGenOpts.SimplifyLibCalls) 1454 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1455 if (!CodeGenOpts.TrapFuncName.empty()) 1456 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1457 } else { 1458 // Attributes that should go on the function, but not the call site. 1459 if (!CodeGenOpts.DisableFPElim) { 1460 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1461 } else if (CodeGenOpts.OmitLeafFramePointer) { 1462 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1463 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1464 } else { 1465 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1466 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1467 } 1468 1469 FuncAttrs.addAttribute("disable-tail-calls", 1470 llvm::toStringRef(CodeGenOpts.DisableTailCalls)); 1471 FuncAttrs.addAttribute("less-precise-fpmad", 1472 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1473 FuncAttrs.addAttribute("no-infs-fp-math", 1474 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1475 FuncAttrs.addAttribute("no-nans-fp-math", 1476 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1477 FuncAttrs.addAttribute("unsafe-fp-math", 1478 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1479 FuncAttrs.addAttribute("use-soft-float", 1480 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1481 FuncAttrs.addAttribute("stack-protector-buffer-size", 1482 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1483 1484 if (!CodeGenOpts.StackRealignment) 1485 FuncAttrs.addAttribute("no-realign-stack"); 1486 1487 // Add target-cpu and target-features attributes to functions. If 1488 // we have a decl for the function and it has a target attribute then 1489 // parse that and add it to the feature set. 1490 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1491 1492 // TODO: Features gets us the features on the command line including 1493 // feature dependencies. For canonicalization purposes we might want to 1494 // avoid putting features in the target-features set if we know it'll be 1495 // one of the default features in the backend, e.g. corei7-avx and +avx or 1496 // figure out non-explicit dependencies. 1497 // Canonicalize the existing features in a new feature map. 1498 // TODO: Migrate the existing backends to keep the map around rather than 1499 // the vector. 1500 llvm::StringMap<bool> FeatureMap; 1501 for (auto F : getTarget().getTargetOpts().Features) { 1502 const char *Name = F.c_str(); 1503 bool Enabled = Name[0] == '+'; 1504 getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled); 1505 } 1506 1507 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1508 if (FD) { 1509 if (const auto *TD = FD->getAttr<TargetAttr>()) { 1510 StringRef FeaturesStr = TD->getFeatures(); 1511 SmallVector<StringRef, 1> AttrFeatures; 1512 FeaturesStr.split(AttrFeatures, ","); 1513 1514 // Grab the various features and prepend a "+" to turn on the feature to 1515 // the backend and add them to our existing set of features. 1516 for (auto &Feature : AttrFeatures) { 1517 // Go ahead and trim whitespace rather than either erroring or 1518 // accepting it weirdly. 1519 Feature = Feature.trim(); 1520 1521 // While we're here iterating check for a different target cpu. 1522 if (Feature.startswith("arch=")) 1523 TargetCPU = Feature.split("=").second.trim(); 1524 else if (Feature.startswith("tune=")) 1525 // We don't support cpu tuning this way currently. 1526 ; 1527 else if (Feature.startswith("fpmath=")) 1528 // TODO: Support the fpmath option this way. It will require checking 1529 // overall feature validity for the function with the rest of the 1530 // attributes on the function. 1531 ; 1532 else if (Feature.startswith("mno-")) 1533 getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second, 1534 false); 1535 else 1536 getTarget().setFeatureEnabled(FeatureMap, Feature, true); 1537 } 1538 } 1539 } 1540 1541 // Produce the canonical string for this set of features. 1542 std::vector<std::string> Features; 1543 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1544 ie = FeatureMap.end(); 1545 it != ie; ++it) 1546 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1547 1548 // Now add the target-cpu and target-features to the function. 1549 if (TargetCPU != "") 1550 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1551 if (!Features.empty()) { 1552 std::sort(Features.begin(), Features.end()); 1553 FuncAttrs.addAttribute("target-features", 1554 llvm::join(Features.begin(), Features.end(), ",")); 1555 } 1556 } 1557 1558 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1559 1560 QualType RetTy = FI.getReturnType(); 1561 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1562 switch (RetAI.getKind()) { 1563 case ABIArgInfo::Extend: 1564 if (RetTy->hasSignedIntegerRepresentation()) 1565 RetAttrs.addAttribute(llvm::Attribute::SExt); 1566 else if (RetTy->hasUnsignedIntegerRepresentation()) 1567 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1568 // FALL THROUGH 1569 case ABIArgInfo::Direct: 1570 if (RetAI.getInReg()) 1571 RetAttrs.addAttribute(llvm::Attribute::InReg); 1572 break; 1573 case ABIArgInfo::Ignore: 1574 break; 1575 1576 case ABIArgInfo::InAlloca: 1577 case ABIArgInfo::Indirect: { 1578 // inalloca and sret disable readnone and readonly 1579 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1580 .removeAttribute(llvm::Attribute::ReadNone); 1581 break; 1582 } 1583 1584 case ABIArgInfo::Expand: 1585 llvm_unreachable("Invalid ABI kind for return argument"); 1586 } 1587 1588 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1589 QualType PTy = RefTy->getPointeeType(); 1590 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1591 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1592 .getQuantity()); 1593 else if (getContext().getTargetAddressSpace(PTy) == 0) 1594 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1595 } 1596 1597 // Attach return attributes. 1598 if (RetAttrs.hasAttributes()) { 1599 PAL.push_back(llvm::AttributeSet::get( 1600 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 1601 } 1602 1603 // Attach attributes to sret. 1604 if (IRFunctionArgs.hasSRetArg()) { 1605 llvm::AttrBuilder SRETAttrs; 1606 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1607 if (RetAI.getInReg()) 1608 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1609 PAL.push_back(llvm::AttributeSet::get( 1610 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1611 } 1612 1613 // Attach attributes to inalloca argument. 1614 if (IRFunctionArgs.hasInallocaArg()) { 1615 llvm::AttrBuilder Attrs; 1616 Attrs.addAttribute(llvm::Attribute::InAlloca); 1617 PAL.push_back(llvm::AttributeSet::get( 1618 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1619 } 1620 1621 unsigned ArgNo = 0; 1622 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1623 E = FI.arg_end(); 1624 I != E; ++I, ++ArgNo) { 1625 QualType ParamType = I->type; 1626 const ABIArgInfo &AI = I->info; 1627 llvm::AttrBuilder Attrs; 1628 1629 // Add attribute for padding argument, if necessary. 1630 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1631 if (AI.getPaddingInReg()) 1632 PAL.push_back(llvm::AttributeSet::get( 1633 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1634 llvm::Attribute::InReg)); 1635 } 1636 1637 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1638 // have the corresponding parameter variable. It doesn't make 1639 // sense to do it here because parameters are so messed up. 1640 switch (AI.getKind()) { 1641 case ABIArgInfo::Extend: 1642 if (ParamType->isSignedIntegerOrEnumerationType()) 1643 Attrs.addAttribute(llvm::Attribute::SExt); 1644 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1645 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1646 Attrs.addAttribute(llvm::Attribute::SExt); 1647 else 1648 Attrs.addAttribute(llvm::Attribute::ZExt); 1649 } 1650 // FALL THROUGH 1651 case ABIArgInfo::Direct: 1652 if (ArgNo == 0 && FI.isChainCall()) 1653 Attrs.addAttribute(llvm::Attribute::Nest); 1654 else if (AI.getInReg()) 1655 Attrs.addAttribute(llvm::Attribute::InReg); 1656 break; 1657 1658 case ABIArgInfo::Indirect: 1659 if (AI.getInReg()) 1660 Attrs.addAttribute(llvm::Attribute::InReg); 1661 1662 if (AI.getIndirectByVal()) 1663 Attrs.addAttribute(llvm::Attribute::ByVal); 1664 1665 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1666 1667 // byval disables readnone and readonly. 1668 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1669 .removeAttribute(llvm::Attribute::ReadNone); 1670 break; 1671 1672 case ABIArgInfo::Ignore: 1673 case ABIArgInfo::Expand: 1674 continue; 1675 1676 case ABIArgInfo::InAlloca: 1677 // inalloca disables readnone and readonly. 1678 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1679 .removeAttribute(llvm::Attribute::ReadNone); 1680 continue; 1681 } 1682 1683 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1684 QualType PTy = RefTy->getPointeeType(); 1685 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1686 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1687 .getQuantity()); 1688 else if (getContext().getTargetAddressSpace(PTy) == 0) 1689 Attrs.addAttribute(llvm::Attribute::NonNull); 1690 } 1691 1692 if (Attrs.hasAttributes()) { 1693 unsigned FirstIRArg, NumIRArgs; 1694 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1695 for (unsigned i = 0; i < NumIRArgs; i++) 1696 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 1697 FirstIRArg + i + 1, Attrs)); 1698 } 1699 } 1700 assert(ArgNo == FI.arg_size()); 1701 1702 if (FuncAttrs.hasAttributes()) 1703 PAL.push_back(llvm:: 1704 AttributeSet::get(getLLVMContext(), 1705 llvm::AttributeSet::FunctionIndex, 1706 FuncAttrs)); 1707} 1708 1709/// An argument came in as a promoted argument; demote it back to its 1710/// declared type. 1711static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1712 const VarDecl *var, 1713 llvm::Value *value) { 1714 llvm::Type *varType = CGF.ConvertType(var->getType()); 1715 1716 // This can happen with promotions that actually don't change the 1717 // underlying type, like the enum promotions. 1718 if (value->getType() == varType) return value; 1719 1720 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1721 && "unexpected promotion type"); 1722 1723 if (isa<llvm::IntegerType>(varType)) 1724 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1725 1726 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1727} 1728 1729/// Returns the attribute (either parameter attribute, or function 1730/// attribute), which declares argument ArgNo to be non-null. 1731static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 1732 QualType ArgType, unsigned ArgNo) { 1733 // FIXME: __attribute__((nonnull)) can also be applied to: 1734 // - references to pointers, where the pointee is known to be 1735 // nonnull (apparently a Clang extension) 1736 // - transparent unions containing pointers 1737 // In the former case, LLVM IR cannot represent the constraint. In 1738 // the latter case, we have no guarantee that the transparent union 1739 // is in fact passed as a pointer. 1740 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 1741 return nullptr; 1742 // First, check attribute on parameter itself. 1743 if (PVD) { 1744 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 1745 return ParmNNAttr; 1746 } 1747 // Check function attributes. 1748 if (!FD) 1749 return nullptr; 1750 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 1751 if (NNAttr->isNonNull(ArgNo)) 1752 return NNAttr; 1753 } 1754 return nullptr; 1755} 1756 1757void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1758 llvm::Function *Fn, 1759 const FunctionArgList &Args) { 1760 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 1761 // Naked functions don't have prologues. 1762 return; 1763 1764 // If this is an implicit-return-zero function, go ahead and 1765 // initialize the return value. TODO: it might be nice to have 1766 // a more general mechanism for this that didn't require synthesized 1767 // return statements. 1768 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1769 if (FD->hasImplicitReturnZero()) { 1770 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1771 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1772 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1773 Builder.CreateStore(Zero, ReturnValue); 1774 } 1775 } 1776 1777 // FIXME: We no longer need the types from FunctionArgList; lift up and 1778 // simplify. 1779 1780 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 1781 // Flattened function arguments. 1782 SmallVector<llvm::Argument *, 16> FnArgs; 1783 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 1784 for (auto &Arg : Fn->args()) { 1785 FnArgs.push_back(&Arg); 1786 } 1787 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 1788 1789 // If we're using inalloca, all the memory arguments are GEPs off of the last 1790 // parameter, which is a pointer to the complete memory area. 1791 llvm::Value *ArgStruct = nullptr; 1792 if (IRFunctionArgs.hasInallocaArg()) { 1793 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; 1794 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1795 } 1796 1797 // Name the struct return parameter. 1798 if (IRFunctionArgs.hasSRetArg()) { 1799 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 1800 AI->setName("agg.result"); 1801 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 1802 llvm::Attribute::NoAlias)); 1803 } 1804 1805 // Track if we received the parameter as a pointer (indirect, byval, or 1806 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1807 // into a local alloca for us. 1808 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1809 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1810 SmallVector<ValueAndIsPtr, 16> ArgVals; 1811 ArgVals.reserve(Args.size()); 1812 1813 // Create a pointer value for every parameter declaration. This usually 1814 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1815 // any cleanups or do anything that might unwind. We do that separately, so 1816 // we can push the cleanups in the correct order for the ABI. 1817 assert(FI.arg_size() == Args.size() && 1818 "Mismatch between function signature & arguments."); 1819 unsigned ArgNo = 0; 1820 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1821 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1822 i != e; ++i, ++info_it, ++ArgNo) { 1823 const VarDecl *Arg = *i; 1824 QualType Ty = info_it->type; 1825 const ABIArgInfo &ArgI = info_it->info; 1826 1827 bool isPromoted = 1828 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1829 1830 unsigned FirstIRArg, NumIRArgs; 1831 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1832 1833 switch (ArgI.getKind()) { 1834 case ABIArgInfo::InAlloca: { 1835 assert(NumIRArgs == 0); 1836 llvm::Value *V = 1837 Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct, 1838 ArgI.getInAllocaFieldIndex(), Arg->getName()); 1839 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1840 break; 1841 } 1842 1843 case ABIArgInfo::Indirect: { 1844 assert(NumIRArgs == 1); 1845 llvm::Value *V = FnArgs[FirstIRArg]; 1846 1847 if (!hasScalarEvaluationKind(Ty)) { 1848 // Aggregates and complex variables are accessed by reference. All we 1849 // need to do is realign the value, if requested 1850 if (ArgI.getIndirectRealign()) { 1851 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1852 1853 // Copy from the incoming argument pointer to the temporary with the 1854 // appropriate alignment. 1855 // 1856 // FIXME: We should have a common utility for generating an aggregate 1857 // copy. 1858 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1859 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1860 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1861 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1862 Builder.CreateMemCpy(Dst, 1863 Src, 1864 llvm::ConstantInt::get(IntPtrTy, 1865 Size.getQuantity()), 1866 ArgI.getIndirectAlign(), 1867 false); 1868 V = AlignedTemp; 1869 } 1870 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1871 } else { 1872 // Load scalar value from indirect argument. 1873 V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty, 1874 Arg->getLocStart()); 1875 1876 if (isPromoted) 1877 V = emitArgumentDemotion(*this, Arg, V); 1878 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1879 } 1880 break; 1881 } 1882 1883 case ABIArgInfo::Extend: 1884 case ABIArgInfo::Direct: { 1885 1886 // If we have the trivial case, handle it with no muss and fuss. 1887 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1888 ArgI.getCoerceToType() == ConvertType(Ty) && 1889 ArgI.getDirectOffset() == 0) { 1890 assert(NumIRArgs == 1); 1891 auto AI = FnArgs[FirstIRArg]; 1892 llvm::Value *V = AI; 1893 1894 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 1895 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 1896 PVD->getFunctionScopeIndex())) 1897 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1898 AI->getArgNo() + 1, 1899 llvm::Attribute::NonNull)); 1900 1901 QualType OTy = PVD->getOriginalType(); 1902 if (const auto *ArrTy = 1903 getContext().getAsConstantArrayType(OTy)) { 1904 // A C99 array parameter declaration with the static keyword also 1905 // indicates dereferenceability, and if the size is constant we can 1906 // use the dereferenceable attribute (which requires the size in 1907 // bytes). 1908 if (ArrTy->getSizeModifier() == ArrayType::Static) { 1909 QualType ETy = ArrTy->getElementType(); 1910 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 1911 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 1912 ArrSize) { 1913 llvm::AttrBuilder Attrs; 1914 Attrs.addDereferenceableAttr( 1915 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 1916 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1917 AI->getArgNo() + 1, Attrs)); 1918 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 1919 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1920 AI->getArgNo() + 1, 1921 llvm::Attribute::NonNull)); 1922 } 1923 } 1924 } else if (const auto *ArrTy = 1925 getContext().getAsVariableArrayType(OTy)) { 1926 // For C99 VLAs with the static keyword, we don't know the size so 1927 // we can't use the dereferenceable attribute, but in addrspace(0) 1928 // we know that it must be nonnull. 1929 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 1930 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 1931 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1932 AI->getArgNo() + 1, 1933 llvm::Attribute::NonNull)); 1934 } 1935 1936 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 1937 if (!AVAttr) 1938 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 1939 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 1940 if (AVAttr) { 1941 llvm::Value *AlignmentValue = 1942 EmitScalarExpr(AVAttr->getAlignment()); 1943 llvm::ConstantInt *AlignmentCI = 1944 cast<llvm::ConstantInt>(AlignmentValue); 1945 unsigned Alignment = 1946 std::min((unsigned) AlignmentCI->getZExtValue(), 1947 +llvm::Value::MaximumAlignment); 1948 1949 llvm::AttrBuilder Attrs; 1950 Attrs.addAlignmentAttr(Alignment); 1951 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1952 AI->getArgNo() + 1, Attrs)); 1953 } 1954 } 1955 1956 if (Arg->getType().isRestrictQualified()) 1957 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1958 AI->getArgNo() + 1, 1959 llvm::Attribute::NoAlias)); 1960 1961 // Ensure the argument is the correct type. 1962 if (V->getType() != ArgI.getCoerceToType()) 1963 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1964 1965 if (isPromoted) 1966 V = emitArgumentDemotion(*this, Arg, V); 1967 1968 if (const CXXMethodDecl *MD = 1969 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1970 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1971 V = CGM.getCXXABI(). 1972 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1973 } 1974 1975 // Because of merging of function types from multiple decls it is 1976 // possible for the type of an argument to not match the corresponding 1977 // type in the function type. Since we are codegening the callee 1978 // in here, add a cast to the argument type. 1979 llvm::Type *LTy = ConvertType(Arg->getType()); 1980 if (V->getType() != LTy) 1981 V = Builder.CreateBitCast(V, LTy); 1982 1983 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1984 break; 1985 } 1986 1987 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1988 1989 // The alignment we need to use is the max of the requested alignment for 1990 // the argument plus the alignment required by our access code below. 1991 unsigned AlignmentToUse = 1992 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1993 AlignmentToUse = std::max(AlignmentToUse, 1994 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1995 1996 Alloca->setAlignment(AlignmentToUse); 1997 llvm::Value *V = Alloca; 1998 llvm::Value *Ptr = V; // Pointer to store into. 1999 CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse); 2000 2001 // If the value is offset in memory, apply the offset now. 2002 if (unsigned Offs = ArgI.getDirectOffset()) { 2003 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 2004 Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs); 2005 Ptr = Builder.CreateBitCast(Ptr, 2006 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 2007 PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); 2008 } 2009 2010 // Fast-isel and the optimizer generally like scalar values better than 2011 // FCAs, so we flatten them if this is safe to do for this argument. 2012 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2013 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2014 STy->getNumElements() > 1) { 2015 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2016 llvm::Type *DstTy = 2017 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 2018 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2019 2020 if (SrcSize <= DstSize) { 2021 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2022 2023 assert(STy->getNumElements() == NumIRArgs); 2024 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2025 auto AI = FnArgs[FirstIRArg + i]; 2026 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2027 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i); 2028 Builder.CreateStore(AI, EltPtr); 2029 } 2030 } else { 2031 llvm::AllocaInst *TempAlloca = 2032 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 2033 TempAlloca->setAlignment(AlignmentToUse); 2034 llvm::Value *TempV = TempAlloca; 2035 2036 assert(STy->getNumElements() == NumIRArgs); 2037 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2038 auto AI = FnArgs[FirstIRArg + i]; 2039 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2040 llvm::Value *EltPtr = 2041 Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i); 2042 Builder.CreateStore(AI, EltPtr); 2043 } 2044 2045 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 2046 } 2047 } else { 2048 // Simple case, just do a coerced store of the argument into the alloca. 2049 assert(NumIRArgs == 1); 2050 auto AI = FnArgs[FirstIRArg]; 2051 AI->setName(Arg->getName() + ".coerce"); 2052 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this); 2053 } 2054 2055 2056 // Match to what EmitParmDecl is expecting for this type. 2057 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2058 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 2059 if (isPromoted) 2060 V = emitArgumentDemotion(*this, Arg, V); 2061 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 2062 } else { 2063 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 2064 } 2065 break; 2066 } 2067 2068 case ABIArgInfo::Expand: { 2069 // If this structure was expanded into multiple arguments then 2070 // we need to create a temporary and reconstruct it from the 2071 // arguments. 2072 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 2073 CharUnits Align = getContext().getDeclAlign(Arg); 2074 Alloca->setAlignment(Align.getQuantity()); 2075 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 2076 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 2077 2078 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2079 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2080 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2081 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2082 auto AI = FnArgs[FirstIRArg + i]; 2083 AI->setName(Arg->getName() + "." + Twine(i)); 2084 } 2085 break; 2086 } 2087 2088 case ABIArgInfo::Ignore: 2089 assert(NumIRArgs == 0); 2090 // Initialize the local variable appropriately. 2091 if (!hasScalarEvaluationKind(Ty)) { 2092 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 2093 } else { 2094 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2095 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 2096 } 2097 break; 2098 } 2099 } 2100 2101 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2102 for (int I = Args.size() - 1; I >= 0; --I) 2103 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2104 I + 1); 2105 } else { 2106 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2107 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 2108 I + 1); 2109 } 2110} 2111 2112static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2113 while (insn->use_empty()) { 2114 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2115 if (!bitcast) return; 2116 2117 // This is "safe" because we would have used a ConstantExpr otherwise. 2118 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2119 bitcast->eraseFromParent(); 2120 } 2121} 2122 2123/// Try to emit a fused autorelease of a return result. 2124static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2125 llvm::Value *result) { 2126 // We must be immediately followed the cast. 2127 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2128 if (BB->empty()) return nullptr; 2129 if (&BB->back() != result) return nullptr; 2130 2131 llvm::Type *resultType = result->getType(); 2132 2133 // result is in a BasicBlock and is therefore an Instruction. 2134 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2135 2136 SmallVector<llvm::Instruction*,4> insnsToKill; 2137 2138 // Look for: 2139 // %generator = bitcast %type1* %generator2 to %type2* 2140 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2141 // We would have emitted this as a constant if the operand weren't 2142 // an Instruction. 2143 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2144 2145 // Require the generator to be immediately followed by the cast. 2146 if (generator->getNextNode() != bitcast) 2147 return nullptr; 2148 2149 insnsToKill.push_back(bitcast); 2150 } 2151 2152 // Look for: 2153 // %generator = call i8* @objc_retain(i8* %originalResult) 2154 // or 2155 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2156 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2157 if (!call) return nullptr; 2158 2159 bool doRetainAutorelease; 2160 2161 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 2162 doRetainAutorelease = true; 2163 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 2164 .objc_retainAutoreleasedReturnValue) { 2165 doRetainAutorelease = false; 2166 2167 // If we emitted an assembly marker for this call (and the 2168 // ARCEntrypoints field should have been set if so), go looking 2169 // for that call. If we can't find it, we can't do this 2170 // optimization. But it should always be the immediately previous 2171 // instruction, unless we needed bitcasts around the call. 2172 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 2173 llvm::Instruction *prev = call->getPrevNode(); 2174 assert(prev); 2175 if (isa<llvm::BitCastInst>(prev)) { 2176 prev = prev->getPrevNode(); 2177 assert(prev); 2178 } 2179 assert(isa<llvm::CallInst>(prev)); 2180 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2181 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 2182 insnsToKill.push_back(prev); 2183 } 2184 } else { 2185 return nullptr; 2186 } 2187 2188 result = call->getArgOperand(0); 2189 insnsToKill.push_back(call); 2190 2191 // Keep killing bitcasts, for sanity. Note that we no longer care 2192 // about precise ordering as long as there's exactly one use. 2193 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2194 if (!bitcast->hasOneUse()) break; 2195 insnsToKill.push_back(bitcast); 2196 result = bitcast->getOperand(0); 2197 } 2198 2199 // Delete all the unnecessary instructions, from latest to earliest. 2200 for (SmallVectorImpl<llvm::Instruction*>::iterator 2201 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 2202 (*i)->eraseFromParent(); 2203 2204 // Do the fused retain/autorelease if we were asked to. 2205 if (doRetainAutorelease) 2206 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2207 2208 // Cast back to the result type. 2209 return CGF.Builder.CreateBitCast(result, resultType); 2210} 2211 2212/// If this is a +1 of the value of an immutable 'self', remove it. 2213static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2214 llvm::Value *result) { 2215 // This is only applicable to a method with an immutable 'self'. 2216 const ObjCMethodDecl *method = 2217 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2218 if (!method) return nullptr; 2219 const VarDecl *self = method->getSelfDecl(); 2220 if (!self->getType().isConstQualified()) return nullptr; 2221 2222 // Look for a retain call. 2223 llvm::CallInst *retainCall = 2224 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2225 if (!retainCall || 2226 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 2227 return nullptr; 2228 2229 // Look for an ordinary load of 'self'. 2230 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2231 llvm::LoadInst *load = 2232 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2233 if (!load || load->isAtomic() || load->isVolatile() || 2234 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 2235 return nullptr; 2236 2237 // Okay! Burn it all down. This relies for correctness on the 2238 // assumption that the retain is emitted as part of the return and 2239 // that thereafter everything is used "linearly". 2240 llvm::Type *resultType = result->getType(); 2241 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2242 assert(retainCall->use_empty()); 2243 retainCall->eraseFromParent(); 2244 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2245 2246 return CGF.Builder.CreateBitCast(load, resultType); 2247} 2248 2249/// Emit an ARC autorelease of the result of a function. 2250/// 2251/// \return the value to actually return from the function 2252static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2253 llvm::Value *result) { 2254 // If we're returning 'self', kill the initial retain. This is a 2255 // heuristic attempt to "encourage correctness" in the really unfortunate 2256 // case where we have a return of self during a dealloc and we desperately 2257 // need to avoid the possible autorelease. 2258 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2259 return self; 2260 2261 // At -O0, try to emit a fused retain/autorelease. 2262 if (CGF.shouldUseFusedARCCalls()) 2263 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2264 return fused; 2265 2266 return CGF.EmitARCAutoreleaseReturnValue(result); 2267} 2268 2269/// Heuristically search for a dominating store to the return-value slot. 2270static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2271 // If there are multiple uses of the return-value slot, just check 2272 // for something immediately preceding the IP. Sometimes this can 2273 // happen with how we generate implicit-returns; it can also happen 2274 // with noreturn cleanups. 2275 if (!CGF.ReturnValue->hasOneUse()) { 2276 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2277 if (IP->empty()) return nullptr; 2278 llvm::Instruction *I = &IP->back(); 2279 2280 // Skip lifetime markers 2281 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2282 IE = IP->rend(); 2283 II != IE; ++II) { 2284 if (llvm::IntrinsicInst *Intrinsic = 2285 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2286 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2287 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2288 ++II; 2289 if (II == IE) 2290 break; 2291 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2292 continue; 2293 } 2294 } 2295 I = &*II; 2296 break; 2297 } 2298 2299 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I); 2300 if (!store) return nullptr; 2301 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 2302 assert(!store->isAtomic() && !store->isVolatile()); // see below 2303 return store; 2304 } 2305 2306 llvm::StoreInst *store = 2307 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 2308 if (!store) return nullptr; 2309 2310 // These aren't actually possible for non-coerced returns, and we 2311 // only care about non-coerced returns on this code path. 2312 assert(!store->isAtomic() && !store->isVolatile()); 2313 2314 // Now do a first-and-dirty dominance check: just walk up the 2315 // single-predecessors chain from the current insertion point. 2316 llvm::BasicBlock *StoreBB = store->getParent(); 2317 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2318 while (IP != StoreBB) { 2319 if (!(IP = IP->getSinglePredecessor())) 2320 return nullptr; 2321 } 2322 2323 // Okay, the store's basic block dominates the insertion point; we 2324 // can do our thing. 2325 return store; 2326} 2327 2328void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2329 bool EmitRetDbgLoc, 2330 SourceLocation EndLoc) { 2331 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2332 // Naked functions don't have epilogues. 2333 Builder.CreateUnreachable(); 2334 return; 2335 } 2336 2337 // Functions with no result always return void. 2338 if (!ReturnValue) { 2339 Builder.CreateRetVoid(); 2340 return; 2341 } 2342 2343 llvm::DebugLoc RetDbgLoc; 2344 llvm::Value *RV = nullptr; 2345 QualType RetTy = FI.getReturnType(); 2346 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2347 2348 switch (RetAI.getKind()) { 2349 case ABIArgInfo::InAlloca: 2350 // Aggregrates get evaluated directly into the destination. Sometimes we 2351 // need to return the sret value in a register, though. 2352 assert(hasAggregateEvaluationKind(RetTy)); 2353 if (RetAI.getInAllocaSRet()) { 2354 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2355 --EI; 2356 llvm::Value *ArgStruct = EI; 2357 llvm::Value *SRet = Builder.CreateStructGEP( 2358 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2359 RV = Builder.CreateLoad(SRet, "sret"); 2360 } 2361 break; 2362 2363 case ABIArgInfo::Indirect: { 2364 auto AI = CurFn->arg_begin(); 2365 if (RetAI.isSRetAfterThis()) 2366 ++AI; 2367 switch (getEvaluationKind(RetTy)) { 2368 case TEK_Complex: { 2369 ComplexPairTy RT = 2370 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 2371 EndLoc); 2372 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 2373 /*isInit*/ true); 2374 break; 2375 } 2376 case TEK_Aggregate: 2377 // Do nothing; aggregrates get evaluated directly into the destination. 2378 break; 2379 case TEK_Scalar: 2380 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2381 MakeNaturalAlignAddrLValue(AI, RetTy), 2382 /*isInit*/ true); 2383 break; 2384 } 2385 break; 2386 } 2387 2388 case ABIArgInfo::Extend: 2389 case ABIArgInfo::Direct: 2390 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2391 RetAI.getDirectOffset() == 0) { 2392 // The internal return value temp always will have pointer-to-return-type 2393 // type, just do a load. 2394 2395 // If there is a dominating store to ReturnValue, we can elide 2396 // the load, zap the store, and usually zap the alloca. 2397 if (llvm::StoreInst *SI = 2398 findDominatingStoreToReturnValue(*this)) { 2399 // Reuse the debug location from the store unless there is 2400 // cleanup code to be emitted between the store and return 2401 // instruction. 2402 if (EmitRetDbgLoc && !AutoreleaseResult) 2403 RetDbgLoc = SI->getDebugLoc(); 2404 // Get the stored value and nuke the now-dead store. 2405 RV = SI->getValueOperand(); 2406 SI->eraseFromParent(); 2407 2408 // If that was the only use of the return value, nuke it as well now. 2409 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 2410 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 2411 ReturnValue = nullptr; 2412 } 2413 2414 // Otherwise, we have to do a simple load. 2415 } else { 2416 RV = Builder.CreateLoad(ReturnValue); 2417 } 2418 } else { 2419 llvm::Value *V = ReturnValue; 2420 CharUnits Align = getContext().getTypeAlignInChars(RetTy); 2421 // If the value is offset in memory, apply the offset now. 2422 if (unsigned Offs = RetAI.getDirectOffset()) { 2423 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 2424 V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs); 2425 V = Builder.CreateBitCast(V, 2426 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2427 Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs)); 2428 } 2429 2430 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this); 2431 } 2432 2433 // In ARC, end functions that return a retainable type with a call 2434 // to objc_autoreleaseReturnValue. 2435 if (AutoreleaseResult) { 2436 assert(getLangOpts().ObjCAutoRefCount && 2437 !FI.isReturnsRetained() && 2438 RetTy->isObjCRetainableType()); 2439 RV = emitAutoreleaseOfResult(*this, RV); 2440 } 2441 2442 break; 2443 2444 case ABIArgInfo::Ignore: 2445 break; 2446 2447 case ABIArgInfo::Expand: 2448 llvm_unreachable("Invalid ABI kind for return argument"); 2449 } 2450 2451 llvm::Instruction *Ret; 2452 if (RV) { 2453 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 2454 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) { 2455 SanitizerScope SanScope(this); 2456 llvm::Value *Cond = Builder.CreateICmpNE( 2457 RV, llvm::Constant::getNullValue(RV->getType())); 2458 llvm::Constant *StaticData[] = { 2459 EmitCheckSourceLocation(EndLoc), 2460 EmitCheckSourceLocation(RetNNAttr->getLocation()), 2461 }; 2462 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 2463 "nonnull_return", StaticData, None); 2464 } 2465 } 2466 Ret = Builder.CreateRet(RV); 2467 } else { 2468 Ret = Builder.CreateRetVoid(); 2469 } 2470 2471 if (RetDbgLoc) 2472 Ret->setDebugLoc(std::move(RetDbgLoc)); 2473} 2474 2475static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2476 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2477 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2478} 2479 2480static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 2481 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2482 // placeholders. 2483 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2484 llvm::Value *Placeholder = 2485 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2486 Placeholder = CGF.Builder.CreateLoad(Placeholder); 2487 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 2488 Ty.getQualifiers(), 2489 AggValueSlot::IsNotDestructed, 2490 AggValueSlot::DoesNotNeedGCBarriers, 2491 AggValueSlot::IsNotAliased); 2492} 2493 2494void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2495 const VarDecl *param, 2496 SourceLocation loc) { 2497 // StartFunction converted the ABI-lowered parameter(s) into a 2498 // local alloca. We need to turn that into an r-value suitable 2499 // for EmitCall. 2500 llvm::Value *local = GetAddrOfLocalVar(param); 2501 2502 QualType type = param->getType(); 2503 2504 // For the most part, we just need to load the alloca, except: 2505 // 1) aggregate r-values are actually pointers to temporaries, and 2506 // 2) references to non-scalars are pointers directly to the aggregate. 2507 // I don't know why references to scalars are different here. 2508 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2509 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2510 return args.add(RValue::getAggregate(local), type); 2511 2512 // Locals which are references to scalars are represented 2513 // with allocas holding the pointer. 2514 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2515 } 2516 2517 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2518 "cannot emit delegate call arguments for inalloca arguments!"); 2519 2520 args.add(convertTempToRValue(local, type, loc), type); 2521} 2522 2523static bool isProvablyNull(llvm::Value *addr) { 2524 return isa<llvm::ConstantPointerNull>(addr); 2525} 2526 2527static bool isProvablyNonNull(llvm::Value *addr) { 2528 return isa<llvm::AllocaInst>(addr); 2529} 2530 2531/// Emit the actual writing-back of a writeback. 2532static void emitWriteback(CodeGenFunction &CGF, 2533 const CallArgList::Writeback &writeback) { 2534 const LValue &srcLV = writeback.Source; 2535 llvm::Value *srcAddr = srcLV.getAddress(); 2536 assert(!isProvablyNull(srcAddr) && 2537 "shouldn't have writeback for provably null argument"); 2538 2539 llvm::BasicBlock *contBB = nullptr; 2540 2541 // If the argument wasn't provably non-null, we need to null check 2542 // before doing the store. 2543 bool provablyNonNull = isProvablyNonNull(srcAddr); 2544 if (!provablyNonNull) { 2545 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2546 contBB = CGF.createBasicBlock("icr.done"); 2547 2548 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2549 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2550 CGF.EmitBlock(writebackBB); 2551 } 2552 2553 // Load the value to writeback. 2554 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2555 2556 // Cast it back, in case we're writing an id to a Foo* or something. 2557 value = CGF.Builder.CreateBitCast(value, 2558 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 2559 "icr.writeback-cast"); 2560 2561 // Perform the writeback. 2562 2563 // If we have a "to use" value, it's something we need to emit a use 2564 // of. This has to be carefully threaded in: if it's done after the 2565 // release it's potentially undefined behavior (and the optimizer 2566 // will ignore it), and if it happens before the retain then the 2567 // optimizer could move the release there. 2568 if (writeback.ToUse) { 2569 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2570 2571 // Retain the new value. No need to block-copy here: the block's 2572 // being passed up the stack. 2573 value = CGF.EmitARCRetainNonBlock(value); 2574 2575 // Emit the intrinsic use here. 2576 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2577 2578 // Load the old value (primitively). 2579 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2580 2581 // Put the new value in place (primitively). 2582 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2583 2584 // Release the old value. 2585 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2586 2587 // Otherwise, we can just do a normal lvalue store. 2588 } else { 2589 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2590 } 2591 2592 // Jump to the continuation block. 2593 if (!provablyNonNull) 2594 CGF.EmitBlock(contBB); 2595} 2596 2597static void emitWritebacks(CodeGenFunction &CGF, 2598 const CallArgList &args) { 2599 for (const auto &I : args.writebacks()) 2600 emitWriteback(CGF, I); 2601} 2602 2603static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2604 const CallArgList &CallArgs) { 2605 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2606 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2607 CallArgs.getCleanupsToDeactivate(); 2608 // Iterate in reverse to increase the likelihood of popping the cleanup. 2609 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2610 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2611 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2612 I->IsActiveIP->eraseFromParent(); 2613 } 2614} 2615 2616static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2617 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2618 if (uop->getOpcode() == UO_AddrOf) 2619 return uop->getSubExpr(); 2620 return nullptr; 2621} 2622 2623/// Emit an argument that's being passed call-by-writeback. That is, 2624/// we are passing the address of 2625static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2626 const ObjCIndirectCopyRestoreExpr *CRE) { 2627 LValue srcLV; 2628 2629 // Make an optimistic effort to emit the address as an l-value. 2630 // This can fail if the argument expression is more complicated. 2631 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2632 srcLV = CGF.EmitLValue(lvExpr); 2633 2634 // Otherwise, just emit it as a scalar. 2635 } else { 2636 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2637 2638 QualType srcAddrType = 2639 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2640 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2641 } 2642 llvm::Value *srcAddr = srcLV.getAddress(); 2643 2644 // The dest and src types don't necessarily match in LLVM terms 2645 // because of the crazy ObjC compatibility rules. 2646 2647 llvm::PointerType *destType = 2648 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2649 2650 // If the address is a constant null, just pass the appropriate null. 2651 if (isProvablyNull(srcAddr)) { 2652 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2653 CRE->getType()); 2654 return; 2655 } 2656 2657 // Create the temporary. 2658 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2659 "icr.temp"); 2660 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2661 // and that cleanup will be conditional if we can't prove that the l-value 2662 // isn't null, so we need to register a dominating point so that the cleanups 2663 // system will make valid IR. 2664 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2665 2666 // Zero-initialize it if we're not doing a copy-initialization. 2667 bool shouldCopy = CRE->shouldCopy(); 2668 if (!shouldCopy) { 2669 llvm::Value *null = 2670 llvm::ConstantPointerNull::get( 2671 cast<llvm::PointerType>(destType->getElementType())); 2672 CGF.Builder.CreateStore(null, temp); 2673 } 2674 2675 llvm::BasicBlock *contBB = nullptr; 2676 llvm::BasicBlock *originBB = nullptr; 2677 2678 // If the address is *not* known to be non-null, we need to switch. 2679 llvm::Value *finalArgument; 2680 2681 bool provablyNonNull = isProvablyNonNull(srcAddr); 2682 if (provablyNonNull) { 2683 finalArgument = temp; 2684 } else { 2685 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2686 2687 finalArgument = CGF.Builder.CreateSelect(isNull, 2688 llvm::ConstantPointerNull::get(destType), 2689 temp, "icr.argument"); 2690 2691 // If we need to copy, then the load has to be conditional, which 2692 // means we need control flow. 2693 if (shouldCopy) { 2694 originBB = CGF.Builder.GetInsertBlock(); 2695 contBB = CGF.createBasicBlock("icr.cont"); 2696 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2697 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2698 CGF.EmitBlock(copyBB); 2699 condEval.begin(CGF); 2700 } 2701 } 2702 2703 llvm::Value *valueToUse = nullptr; 2704 2705 // Perform a copy if necessary. 2706 if (shouldCopy) { 2707 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2708 assert(srcRV.isScalar()); 2709 2710 llvm::Value *src = srcRV.getScalarVal(); 2711 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2712 "icr.cast"); 2713 2714 // Use an ordinary store, not a store-to-lvalue. 2715 CGF.Builder.CreateStore(src, temp); 2716 2717 // If optimization is enabled, and the value was held in a 2718 // __strong variable, we need to tell the optimizer that this 2719 // value has to stay alive until we're doing the store back. 2720 // This is because the temporary is effectively unretained, 2721 // and so otherwise we can violate the high-level semantics. 2722 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2723 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2724 valueToUse = src; 2725 } 2726 } 2727 2728 // Finish the control flow if we needed it. 2729 if (shouldCopy && !provablyNonNull) { 2730 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2731 CGF.EmitBlock(contBB); 2732 2733 // Make a phi for the value to intrinsically use. 2734 if (valueToUse) { 2735 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2736 "icr.to-use"); 2737 phiToUse->addIncoming(valueToUse, copyBB); 2738 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2739 originBB); 2740 valueToUse = phiToUse; 2741 } 2742 2743 condEval.end(CGF); 2744 } 2745 2746 args.addWriteback(srcLV, temp, valueToUse); 2747 args.add(RValue::get(finalArgument), CRE->getType()); 2748} 2749 2750void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2751 assert(!StackBase && !StackCleanup.isValid()); 2752 2753 // Save the stack. 2754 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2755 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 2756 2757 // Control gets really tied up in landing pads, so we have to spill the 2758 // stacksave to an alloca to avoid violating SSA form. 2759 // TODO: This is dead if we never emit the cleanup. We should create the 2760 // alloca and store lazily on the first cleanup emission. 2761 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2762 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2763 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2764 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2765 assert(StackCleanup.isValid()); 2766} 2767 2768void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2769 if (StackBase) { 2770 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2771 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2772 // We could load StackBase from StackBaseMem, but in the non-exceptional 2773 // case we can skip it. 2774 CGF.Builder.CreateCall(F, StackBase); 2775 } 2776} 2777 2778void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 2779 SourceLocation ArgLoc, 2780 const FunctionDecl *FD, 2781 unsigned ParmNum) { 2782 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 2783 return; 2784 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 2785 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 2786 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 2787 if (!NNAttr) 2788 return; 2789 SanitizerScope SanScope(this); 2790 assert(RV.isScalar()); 2791 llvm::Value *V = RV.getScalarVal(); 2792 llvm::Value *Cond = 2793 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 2794 llvm::Constant *StaticData[] = { 2795 EmitCheckSourceLocation(ArgLoc), 2796 EmitCheckSourceLocation(NNAttr->getLocation()), 2797 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 2798 }; 2799 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 2800 "nonnull_arg", StaticData, None); 2801} 2802 2803void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2804 ArrayRef<QualType> ArgTypes, 2805 CallExpr::const_arg_iterator ArgBeg, 2806 CallExpr::const_arg_iterator ArgEnd, 2807 const FunctionDecl *CalleeDecl, 2808 unsigned ParamsToSkip) { 2809 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2810 // because arguments are destroyed left to right in the callee. 2811 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2812 // Insert a stack save if we're going to need any inalloca args. 2813 bool HasInAllocaArgs = false; 2814 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2815 I != E && !HasInAllocaArgs; ++I) 2816 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2817 if (HasInAllocaArgs) { 2818 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2819 Args.allocateArgumentMemory(*this); 2820 } 2821 2822 // Evaluate each argument. 2823 size_t CallArgsStart = Args.size(); 2824 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2825 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2826 EmitCallArg(Args, *Arg, ArgTypes[I]); 2827 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2828 CalleeDecl, ParamsToSkip + I); 2829 } 2830 2831 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2832 // IR function. 2833 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2834 return; 2835 } 2836 2837 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2838 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2839 assert(Arg != ArgEnd); 2840 EmitCallArg(Args, *Arg, ArgTypes[I]); 2841 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 2842 CalleeDecl, ParamsToSkip + I); 2843 } 2844} 2845 2846namespace { 2847 2848struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2849 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2850 : Addr(Addr), Ty(Ty) {} 2851 2852 llvm::Value *Addr; 2853 QualType Ty; 2854 2855 void Emit(CodeGenFunction &CGF, Flags flags) override { 2856 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2857 assert(!Dtor->isTrivial()); 2858 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2859 /*Delegating=*/false, Addr); 2860 } 2861}; 2862 2863} 2864 2865struct DisableDebugLocationUpdates { 2866 CodeGenFunction &CGF; 2867 bool disabledDebugInfo; 2868 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 2869 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 2870 CGF.disableDebugInfo(); 2871 } 2872 ~DisableDebugLocationUpdates() { 2873 if (disabledDebugInfo) 2874 CGF.enableDebugInfo(); 2875 } 2876}; 2877 2878void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2879 QualType type) { 2880 DisableDebugLocationUpdates Dis(*this, E); 2881 if (const ObjCIndirectCopyRestoreExpr *CRE 2882 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2883 assert(getLangOpts().ObjCAutoRefCount); 2884 assert(getContext().hasSameType(E->getType(), type)); 2885 return emitWritebackArg(*this, args, CRE); 2886 } 2887 2888 assert(type->isReferenceType() == E->isGLValue() && 2889 "reference binding to unmaterialized r-value!"); 2890 2891 if (E->isGLValue()) { 2892 assert(E->getObjectKind() == OK_Ordinary); 2893 return args.add(EmitReferenceBindingToExpr(E), type); 2894 } 2895 2896 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2897 2898 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2899 // However, we still have to push an EH-only cleanup in case we unwind before 2900 // we make it to the call. 2901 if (HasAggregateEvalKind && 2902 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2903 // If we're using inalloca, use the argument memory. Otherwise, use a 2904 // temporary. 2905 AggValueSlot Slot; 2906 if (args.isUsingInAlloca()) 2907 Slot = createPlaceholderSlot(*this, type); 2908 else 2909 Slot = CreateAggTemp(type, "agg.tmp"); 2910 2911 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2912 bool DestroyedInCallee = 2913 RD && RD->hasNonTrivialDestructor() && 2914 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 2915 if (DestroyedInCallee) 2916 Slot.setExternallyDestructed(); 2917 2918 EmitAggExpr(E, Slot); 2919 RValue RV = Slot.asRValue(); 2920 args.add(RV, type); 2921 2922 if (DestroyedInCallee) { 2923 // Create a no-op GEP between the placeholder and the cleanup so we can 2924 // RAUW it successfully. It also serves as a marker of the first 2925 // instruction where the cleanup is active. 2926 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2927 // This unreachable is a temporary marker which will be removed later. 2928 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2929 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2930 } 2931 return; 2932 } 2933 2934 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2935 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2936 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2937 assert(L.isSimple()); 2938 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2939 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2940 } else { 2941 // We can't represent a misaligned lvalue in the CallArgList, so copy 2942 // to an aligned temporary now. 2943 llvm::Value *tmp = CreateMemTemp(type); 2944 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2945 L.getAlignment()); 2946 args.add(RValue::getAggregate(tmp), type); 2947 } 2948 return; 2949 } 2950 2951 args.add(EmitAnyExprToTemp(E), type); 2952} 2953 2954QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 2955 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 2956 // implicitly widens null pointer constants that are arguments to varargs 2957 // functions to pointer-sized ints. 2958 if (!getTarget().getTriple().isOSWindows()) 2959 return Arg->getType(); 2960 2961 if (Arg->getType()->isIntegerType() && 2962 getContext().getTypeSize(Arg->getType()) < 2963 getContext().getTargetInfo().getPointerWidth(0) && 2964 Arg->isNullPointerConstant(getContext(), 2965 Expr::NPC_ValueDependentIsNotNull)) { 2966 return getContext().getIntPtrType(); 2967 } 2968 2969 return Arg->getType(); 2970} 2971 2972// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2973// optimizer it can aggressively ignore unwind edges. 2974void 2975CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2976 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2977 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2978 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2979 CGM.getNoObjCARCExceptionsMetadata()); 2980} 2981 2982/// Emits a call to the given no-arguments nounwind runtime function. 2983llvm::CallInst * 2984CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2985 const llvm::Twine &name) { 2986 return EmitNounwindRuntimeCall(callee, None, name); 2987} 2988 2989/// Emits a call to the given nounwind runtime function. 2990llvm::CallInst * 2991CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2992 ArrayRef<llvm::Value*> args, 2993 const llvm::Twine &name) { 2994 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2995 call->setDoesNotThrow(); 2996 return call; 2997} 2998 2999/// Emits a simple call (never an invoke) to the given no-arguments 3000/// runtime function. 3001llvm::CallInst * 3002CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3003 const llvm::Twine &name) { 3004 return EmitRuntimeCall(callee, None, name); 3005} 3006 3007/// Emits a simple call (never an invoke) to the given runtime 3008/// function. 3009llvm::CallInst * 3010CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3011 ArrayRef<llvm::Value*> args, 3012 const llvm::Twine &name) { 3013 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 3014 call->setCallingConv(getRuntimeCC()); 3015 return call; 3016} 3017 3018/// Emits a call or invoke to the given noreturn runtime function. 3019void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3020 ArrayRef<llvm::Value*> args) { 3021 if (getInvokeDest()) { 3022 llvm::InvokeInst *invoke = 3023 Builder.CreateInvoke(callee, 3024 getUnreachableBlock(), 3025 getInvokeDest(), 3026 args); 3027 invoke->setDoesNotReturn(); 3028 invoke->setCallingConv(getRuntimeCC()); 3029 } else { 3030 llvm::CallInst *call = Builder.CreateCall(callee, args); 3031 call->setDoesNotReturn(); 3032 call->setCallingConv(getRuntimeCC()); 3033 Builder.CreateUnreachable(); 3034 } 3035} 3036 3037/// Emits a call or invoke instruction to the given nullary runtime 3038/// function. 3039llvm::CallSite 3040CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3041 const Twine &name) { 3042 return EmitRuntimeCallOrInvoke(callee, None, name); 3043} 3044 3045/// Emits a call or invoke instruction to the given runtime function. 3046llvm::CallSite 3047CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3048 ArrayRef<llvm::Value*> args, 3049 const Twine &name) { 3050 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3051 callSite.setCallingConv(getRuntimeCC()); 3052 return callSite; 3053} 3054 3055llvm::CallSite 3056CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3057 const Twine &Name) { 3058 return EmitCallOrInvoke(Callee, None, Name); 3059} 3060 3061/// Emits a call or invoke instruction to the given function, depending 3062/// on the current state of the EH stack. 3063llvm::CallSite 3064CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3065 ArrayRef<llvm::Value *> Args, 3066 const Twine &Name) { 3067 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3068 3069 llvm::Instruction *Inst; 3070 if (!InvokeDest) 3071 Inst = Builder.CreateCall(Callee, Args, Name); 3072 else { 3073 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3074 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 3075 EmitBlock(ContBB); 3076 } 3077 3078 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3079 // optimizer it can aggressively ignore unwind edges. 3080 if (CGM.getLangOpts().ObjCAutoRefCount) 3081 AddObjCARCExceptionMetadata(Inst); 3082 3083 return llvm::CallSite(Inst); 3084} 3085 3086/// \brief Store a non-aggregate value to an address to initialize it. For 3087/// initialization, a non-atomic store will be used. 3088static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3089 LValue Dst) { 3090 if (Src.isScalar()) 3091 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3092 else 3093 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3094} 3095 3096void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3097 llvm::Value *New) { 3098 DeferredReplacements.push_back(std::make_pair(Old, New)); 3099} 3100 3101RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3102 llvm::Value *Callee, 3103 ReturnValueSlot ReturnValue, 3104 const CallArgList &CallArgs, 3105 const Decl *TargetDecl, 3106 llvm::Instruction **callOrInvoke) { 3107 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3108 3109 // Handle struct-return functions by passing a pointer to the 3110 // location that we would like to return into. 3111 QualType RetTy = CallInfo.getReturnType(); 3112 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3113 3114 llvm::FunctionType *IRFuncTy = 3115 cast<llvm::FunctionType>( 3116 cast<llvm::PointerType>(Callee->getType())->getElementType()); 3117 3118 // If we're using inalloca, insert the allocation after the stack save. 3119 // FIXME: Do this earlier rather than hacking it in here! 3120 llvm::AllocaInst *ArgMemory = nullptr; 3121 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3122 llvm::Instruction *IP = CallArgs.getStackBase(); 3123 llvm::AllocaInst *AI; 3124 if (IP) { 3125 IP = IP->getNextNode(); 3126 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 3127 } else { 3128 AI = CreateTempAlloca(ArgStruct, "argmem"); 3129 } 3130 AI->setUsedWithInAlloca(true); 3131 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3132 ArgMemory = AI; 3133 } 3134 3135 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3136 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3137 3138 // If the call returns a temporary with struct return, create a temporary 3139 // alloca to hold the result, unless one is given to us. 3140 llvm::Value *SRetPtr = nullptr; 3141 size_t UnusedReturnSize = 0; 3142 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 3143 SRetPtr = ReturnValue.getValue(); 3144 if (!SRetPtr) { 3145 SRetPtr = CreateMemTemp(RetTy); 3146 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3147 uint64_t size = 3148 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3149 if (EmitLifetimeStart(size, SRetPtr)) 3150 UnusedReturnSize = size; 3151 } 3152 } 3153 if (IRFunctionArgs.hasSRetArg()) { 3154 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; 3155 } else { 3156 llvm::Value *Addr = 3157 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3158 RetAI.getInAllocaFieldIndex()); 3159 Builder.CreateStore(SRetPtr, Addr); 3160 } 3161 } 3162 3163 assert(CallInfo.arg_size() == CallArgs.size() && 3164 "Mismatch between function signature & arguments."); 3165 unsigned ArgNo = 0; 3166 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3167 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3168 I != E; ++I, ++info_it, ++ArgNo) { 3169 const ABIArgInfo &ArgInfo = info_it->info; 3170 RValue RV = I->RV; 3171 3172 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 3173 3174 // Insert a padding argument to ensure proper alignment. 3175 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3176 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3177 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3178 3179 unsigned FirstIRArg, NumIRArgs; 3180 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3181 3182 switch (ArgInfo.getKind()) { 3183 case ABIArgInfo::InAlloca: { 3184 assert(NumIRArgs == 0); 3185 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3186 if (RV.isAggregate()) { 3187 // Replace the placeholder with the appropriate argument slot GEP. 3188 llvm::Instruction *Placeholder = 3189 cast<llvm::Instruction>(RV.getAggregateAddr()); 3190 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3191 Builder.SetInsertPoint(Placeholder); 3192 llvm::Value *Addr = 3193 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3194 ArgInfo.getInAllocaFieldIndex()); 3195 Builder.restoreIP(IP); 3196 deferPlaceholderReplacement(Placeholder, Addr); 3197 } else { 3198 // Store the RValue into the argument struct. 3199 llvm::Value *Addr = 3200 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, 3201 ArgInfo.getInAllocaFieldIndex()); 3202 unsigned AS = Addr->getType()->getPointerAddressSpace(); 3203 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3204 // There are some cases where a trivial bitcast is not avoidable. The 3205 // definition of a type later in a translation unit may change it's type 3206 // from {}* to (%struct.foo*)*. 3207 if (Addr->getType() != MemType) 3208 Addr = Builder.CreateBitCast(Addr, MemType); 3209 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 3210 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3211 } 3212 break; 3213 } 3214 3215 case ABIArgInfo::Indirect: { 3216 assert(NumIRArgs == 1); 3217 if (RV.isScalar() || RV.isComplex()) { 3218 // Make a temporary alloca to pass the argument. 3219 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3220 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 3221 AI->setAlignment(ArgInfo.getIndirectAlign()); 3222 IRCallArgs[FirstIRArg] = AI; 3223 3224 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); 3225 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3226 } else { 3227 // We want to avoid creating an unnecessary temporary+copy here; 3228 // however, we need one in three cases: 3229 // 1. If the argument is not byval, and we are required to copy the 3230 // source. (This case doesn't occur on any common architecture.) 3231 // 2. If the argument is byval, RV is not sufficiently aligned, and 3232 // we cannot force it to be sufficiently aligned. 3233 // 3. If the argument is byval, but RV is located in an address space 3234 // different than that of the argument (0). 3235 llvm::Value *Addr = RV.getAggregateAddr(); 3236 unsigned Align = ArgInfo.getIndirectAlign(); 3237 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3238 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 3239 const unsigned ArgAddrSpace = 3240 (FirstIRArg < IRFuncTy->getNumParams() 3241 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3242 : 0); 3243 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3244 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 3245 llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) || 3246 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3247 // Create an aligned temporary, and copy to it. 3248 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 3249 if (Align > AI->getAlignment()) 3250 AI->setAlignment(Align); 3251 IRCallArgs[FirstIRArg] = AI; 3252 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3253 } else { 3254 // Skip the extra memcpy call. 3255 IRCallArgs[FirstIRArg] = Addr; 3256 } 3257 } 3258 break; 3259 } 3260 3261 case ABIArgInfo::Ignore: 3262 assert(NumIRArgs == 0); 3263 break; 3264 3265 case ABIArgInfo::Extend: 3266 case ABIArgInfo::Direct: { 3267 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3268 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3269 ArgInfo.getDirectOffset() == 0) { 3270 assert(NumIRArgs == 1); 3271 llvm::Value *V; 3272 if (RV.isScalar()) 3273 V = RV.getScalarVal(); 3274 else 3275 V = Builder.CreateLoad(RV.getAggregateAddr()); 3276 3277 // We might have to widen integers, but we should never truncate. 3278 if (ArgInfo.getCoerceToType() != V->getType() && 3279 V->getType()->isIntegerTy()) 3280 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3281 3282 // If the argument doesn't match, perform a bitcast to coerce it. This 3283 // can happen due to trivial type mismatches. 3284 if (FirstIRArg < IRFuncTy->getNumParams() && 3285 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3286 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3287 IRCallArgs[FirstIRArg] = V; 3288 break; 3289 } 3290 3291 // FIXME: Avoid the conversion through memory if possible. 3292 llvm::Value *SrcPtr; 3293 CharUnits SrcAlign; 3294 if (RV.isScalar() || RV.isComplex()) { 3295 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 3296 SrcAlign = TypeAlign; 3297 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 3298 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3299 } else { 3300 SrcPtr = RV.getAggregateAddr(); 3301 // This alignment is guaranteed by EmitCallArg. 3302 SrcAlign = TypeAlign; 3303 } 3304 3305 // If the value is offset in memory, apply the offset now. 3306 if (unsigned Offs = ArgInfo.getDirectOffset()) { 3307 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 3308 SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs); 3309 SrcPtr = Builder.CreateBitCast(SrcPtr, 3310 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 3311 SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); 3312 } 3313 3314 // Fast-isel and the optimizer generally like scalar values better than 3315 // FCAs, so we flatten them if this is safe to do for this argument. 3316 llvm::StructType *STy = 3317 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3318 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3319 llvm::Type *SrcTy = 3320 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 3321 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3322 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3323 3324 // If the source type is smaller than the destination type of the 3325 // coerce-to logic, copy the source value into a temp alloca the size 3326 // of the destination type to allow loading all of it. The bits past 3327 // the source value are left undef. 3328 if (SrcSize < DstSize) { 3329 llvm::AllocaInst *TempAlloca 3330 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 3331 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 3332 SrcPtr = TempAlloca; 3333 } else { 3334 SrcPtr = Builder.CreateBitCast(SrcPtr, 3335 llvm::PointerType::getUnqual(STy)); 3336 } 3337 3338 assert(NumIRArgs == STy->getNumElements()); 3339 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3340 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i); 3341 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 3342 // We don't know what we're loading from. 3343 LI->setAlignment(1); 3344 IRCallArgs[FirstIRArg + i] = LI; 3345 } 3346 } else { 3347 // In the simple case, just pass the coerced loaded value. 3348 assert(NumIRArgs == 1); 3349 IRCallArgs[FirstIRArg] = 3350 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 3351 SrcAlign, *this); 3352 } 3353 3354 break; 3355 } 3356 3357 case ABIArgInfo::Expand: 3358 unsigned IRArgPos = FirstIRArg; 3359 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 3360 assert(IRArgPos == FirstIRArg + NumIRArgs); 3361 break; 3362 } 3363 } 3364 3365 if (ArgMemory) { 3366 llvm::Value *Arg = ArgMemory; 3367 if (CallInfo.isVariadic()) { 3368 // When passing non-POD arguments by value to variadic functions, we will 3369 // end up with a variadic prototype and an inalloca call site. In such 3370 // cases, we can't do any parameter mismatch checks. Give up and bitcast 3371 // the callee. 3372 unsigned CalleeAS = 3373 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 3374 Callee = Builder.CreateBitCast( 3375 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 3376 } else { 3377 llvm::Type *LastParamTy = 3378 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 3379 if (Arg->getType() != LastParamTy) { 3380#ifndef NDEBUG 3381 // Assert that these structs have equivalent element types. 3382 llvm::StructType *FullTy = CallInfo.getArgStruct(); 3383 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 3384 cast<llvm::PointerType>(LastParamTy)->getElementType()); 3385 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 3386 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 3387 DE = DeclaredTy->element_end(), 3388 FI = FullTy->element_begin(); 3389 DI != DE; ++DI, ++FI) 3390 assert(*DI == *FI); 3391#endif 3392 Arg = Builder.CreateBitCast(Arg, LastParamTy); 3393 } 3394 } 3395 assert(IRFunctionArgs.hasInallocaArg()); 3396 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 3397 } 3398 3399 if (!CallArgs.getCleanupsToDeactivate().empty()) 3400 deactivateArgCleanupsBeforeCall(*this, CallArgs); 3401 3402 // If the callee is a bitcast of a function to a varargs pointer to function 3403 // type, check to see if we can remove the bitcast. This handles some cases 3404 // with unprototyped functions. 3405 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 3406 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 3407 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 3408 llvm::FunctionType *CurFT = 3409 cast<llvm::FunctionType>(CurPT->getElementType()); 3410 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 3411 3412 if (CE->getOpcode() == llvm::Instruction::BitCast && 3413 ActualFT->getReturnType() == CurFT->getReturnType() && 3414 ActualFT->getNumParams() == CurFT->getNumParams() && 3415 ActualFT->getNumParams() == IRCallArgs.size() && 3416 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 3417 bool ArgsMatch = true; 3418 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 3419 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 3420 ArgsMatch = false; 3421 break; 3422 } 3423 3424 // Strip the cast if we can get away with it. This is a nice cleanup, 3425 // but also allows us to inline the function at -O0 if it is marked 3426 // always_inline. 3427 if (ArgsMatch) 3428 Callee = CalleeF; 3429 } 3430 } 3431 3432 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 3433 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 3434 // Inalloca argument can have different type. 3435 if (IRFunctionArgs.hasInallocaArg() && 3436 i == IRFunctionArgs.getInallocaArgNo()) 3437 continue; 3438 if (i < IRFuncTy->getNumParams()) 3439 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 3440 } 3441 3442 unsigned CallingConv; 3443 CodeGen::AttributeListType AttributeList; 3444 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 3445 CallingConv, true); 3446 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 3447 AttributeList); 3448 3449 llvm::BasicBlock *InvokeDest = nullptr; 3450 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 3451 llvm::Attribute::NoUnwind) || 3452 currentFunctionUsesSEHTry()) 3453 InvokeDest = getInvokeDest(); 3454 3455 llvm::CallSite CS; 3456 if (!InvokeDest) { 3457 CS = Builder.CreateCall(Callee, IRCallArgs); 3458 } else { 3459 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 3460 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); 3461 EmitBlock(Cont); 3462 } 3463 if (callOrInvoke) 3464 *callOrInvoke = CS.getInstruction(); 3465 3466 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 3467 !CS.hasFnAttr(llvm::Attribute::NoInline)) 3468 Attrs = 3469 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3470 llvm::Attribute::AlwaysInline); 3471 3472 // Disable inlining inside SEH __try blocks. 3473 if (isSEHTryScope()) 3474 Attrs = 3475 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 3476 llvm::Attribute::NoInline); 3477 3478 CS.setAttributes(Attrs); 3479 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3480 3481 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3482 // optimizer it can aggressively ignore unwind edges. 3483 if (CGM.getLangOpts().ObjCAutoRefCount) 3484 AddObjCARCExceptionMetadata(CS.getInstruction()); 3485 3486 // If the call doesn't return, finish the basic block and clear the 3487 // insertion point; this allows the rest of IRgen to discard 3488 // unreachable code. 3489 if (CS.doesNotReturn()) { 3490 if (UnusedReturnSize) 3491 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3492 SRetPtr); 3493 3494 Builder.CreateUnreachable(); 3495 Builder.ClearInsertionPoint(); 3496 3497 // FIXME: For now, emit a dummy basic block because expr emitters in 3498 // generally are not ready to handle emitting expressions at unreachable 3499 // points. 3500 EnsureInsertPoint(); 3501 3502 // Return a reasonable RValue. 3503 return GetUndefRValue(RetTy); 3504 } 3505 3506 llvm::Instruction *CI = CS.getInstruction(); 3507 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 3508 CI->setName("call"); 3509 3510 // Emit any writebacks immediately. Arguably this should happen 3511 // after any return-value munging. 3512 if (CallArgs.hasWritebacks()) 3513 emitWritebacks(*this, CallArgs); 3514 3515 // The stack cleanup for inalloca arguments has to run out of the normal 3516 // lexical order, so deactivate it and run it manually here. 3517 CallArgs.freeArgumentMemory(*this); 3518 3519 RValue Ret = [&] { 3520 switch (RetAI.getKind()) { 3521 case ABIArgInfo::InAlloca: 3522 case ABIArgInfo::Indirect: { 3523 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3524 if (UnusedReturnSize) 3525 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 3526 SRetPtr); 3527 return ret; 3528 } 3529 3530 case ABIArgInfo::Ignore: 3531 // If we are ignoring an argument that had a result, make sure to 3532 // construct the appropriate return value for our caller. 3533 return GetUndefRValue(RetTy); 3534 3535 case ABIArgInfo::Extend: 3536 case ABIArgInfo::Direct: { 3537 llvm::Type *RetIRTy = ConvertType(RetTy); 3538 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3539 switch (getEvaluationKind(RetTy)) { 3540 case TEK_Complex: { 3541 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3542 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3543 return RValue::getComplex(std::make_pair(Real, Imag)); 3544 } 3545 case TEK_Aggregate: { 3546 llvm::Value *DestPtr = ReturnValue.getValue(); 3547 bool DestIsVolatile = ReturnValue.isVolatile(); 3548 CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy); 3549 3550 if (!DestPtr) { 3551 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3552 DestIsVolatile = false; 3553 } 3554 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign); 3555 return RValue::getAggregate(DestPtr); 3556 } 3557 case TEK_Scalar: { 3558 // If the argument doesn't match, perform a bitcast to coerce it. This 3559 // can happen due to trivial type mismatches. 3560 llvm::Value *V = CI; 3561 if (V->getType() != RetIRTy) 3562 V = Builder.CreateBitCast(V, RetIRTy); 3563 return RValue::get(V); 3564 } 3565 } 3566 llvm_unreachable("bad evaluation kind"); 3567 } 3568 3569 llvm::Value *DestPtr = ReturnValue.getValue(); 3570 bool DestIsVolatile = ReturnValue.isVolatile(); 3571 CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy); 3572 3573 if (!DestPtr) { 3574 DestPtr = CreateMemTemp(RetTy, "coerce"); 3575 DestIsVolatile = false; 3576 } 3577 3578 // If the value is offset in memory, apply the offset now. 3579 llvm::Value *StorePtr = DestPtr; 3580 CharUnits StoreAlign = DestAlign; 3581 if (unsigned Offs = RetAI.getDirectOffset()) { 3582 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 3583 StorePtr = 3584 Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs); 3585 StorePtr = Builder.CreateBitCast(StorePtr, 3586 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 3587 StoreAlign = 3588 StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs)); 3589 } 3590 CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this); 3591 3592 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3593 } 3594 3595 case ABIArgInfo::Expand: 3596 llvm_unreachable("Invalid ABI kind for return argument"); 3597 } 3598 3599 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3600 } (); 3601 3602 if (Ret.isScalar() && TargetDecl) { 3603 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 3604 llvm::Value *OffsetValue = nullptr; 3605 if (const auto *Offset = AA->getOffset()) 3606 OffsetValue = EmitScalarExpr(Offset); 3607 3608 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 3609 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 3610 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 3611 OffsetValue); 3612 } 3613 } 3614 3615 return Ret; 3616} 3617 3618/* VarArg handling */ 3619 3620llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 3621 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 3622} 3623