CGCall.cpp revision 234353
1130803Smarcel//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2130803Smarcel// 3130803Smarcel// The LLVM Compiler Infrastructure 4130803Smarcel// 5130803Smarcel// This file is distributed under the University of Illinois Open Source 6130803Smarcel// License. See LICENSE.TXT for details. 7130803Smarcel// 8130803Smarcel//===----------------------------------------------------------------------===// 9130803Smarcel// 10130803Smarcel// These classes wrap the information about a call or function 11130803Smarcel// definition used to handle ABI compliancy. 12130803Smarcel// 13130803Smarcel//===----------------------------------------------------------------------===// 14130803Smarcel 15130803Smarcel#include "CGCall.h" 16130803Smarcel#include "CGCXXABI.h" 17130803Smarcel#include "ABIInfo.h" 18130803Smarcel#include "CodeGenFunction.h" 19130803Smarcel#include "CodeGenModule.h" 20130803Smarcel#include "TargetInfo.h" 21130803Smarcel#include "clang/Basic/TargetInfo.h" 22130803Smarcel#include "clang/AST/Decl.h" 23130803Smarcel#include "clang/AST/DeclCXX.h" 24130803Smarcel#include "clang/AST/DeclObjC.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/Attributes.h" 27#include "llvm/Support/CallSite.h" 28#include "llvm/Target/TargetData.h" 29#include "llvm/InlineAsm.h" 30#include "llvm/Transforms/Utils/Local.h" 31using namespace clang; 32using namespace CodeGen; 33 34/***/ 35 36static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 37 switch (CC) { 38 default: return llvm::CallingConv::C; 39 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 40 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 41 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 42 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 43 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 44 // TODO: add support for CC_X86Pascal to llvm 45 } 46} 47 48/// Derives the 'this' type for codegen purposes, i.e. ignoring method 49/// qualification. 50/// FIXME: address space qualification? 51static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 52 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 53 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 54} 55 56/// Returns the canonical formal type of the given C++ method. 57static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 58 return MD->getType()->getCanonicalTypeUnqualified() 59 .getAs<FunctionProtoType>(); 60} 61 62/// Returns the "extra-canonicalized" return type, which discards 63/// qualifiers on the return type. Codegen doesn't care about them, 64/// and it makes ABI code a little easier to be able to assume that 65/// all parameter and return types are top-level unqualified. 66static CanQualType GetReturnType(QualType RetTy) { 67 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 68} 69 70/// Arrange the argument and result information for a value of the 71/// given unprototyped function type. 72const CGFunctionInfo & 73CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 74 // When translating an unprototyped function type, always use a 75 // variadic type. 76 return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(), 77 ArrayRef<CanQualType>(), 78 FTNP->getExtInfo(), 79 RequiredArgs(0)); 80} 81 82/// Arrange the argument and result information for a value of the 83/// given function type, on top of any implicit parameters already 84/// stored. 85static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT, 86 SmallVectorImpl<CanQualType> &argTypes, 87 CanQual<FunctionProtoType> FTP) { 88 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 89 // FIXME: Kill copy. 90 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 91 argTypes.push_back(FTP->getArgType(i)); 92 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 93 return CGT.arrangeFunctionType(resultType, argTypes, 94 FTP->getExtInfo(), required); 95} 96 97/// Arrange the argument and result information for a value of the 98/// given function type. 99const CGFunctionInfo & 100CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) { 101 SmallVector<CanQualType, 16> argTypes; 102 return ::arrangeFunctionType(*this, argTypes, FTP); 103} 104 105static CallingConv getCallingConventionForDecl(const Decl *D) { 106 // Set the appropriate calling convention for the Function. 107 if (D->hasAttr<StdCallAttr>()) 108 return CC_X86StdCall; 109 110 if (D->hasAttr<FastCallAttr>()) 111 return CC_X86FastCall; 112 113 if (D->hasAttr<ThisCallAttr>()) 114 return CC_X86ThisCall; 115 116 if (D->hasAttr<PascalAttr>()) 117 return CC_X86Pascal; 118 119 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 120 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 121 122 return CC_C; 123} 124 125/// Arrange the argument and result information for a call to an 126/// unknown C++ non-static member function of the given abstract type. 127/// The member function must be an ordinary function, i.e. not a 128/// constructor or destructor. 129const CGFunctionInfo & 130CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 131 const FunctionProtoType *FTP) { 132 SmallVector<CanQualType, 16> argTypes; 133 134 // Add the 'this' pointer. 135 argTypes.push_back(GetThisType(Context, RD)); 136 137 return ::arrangeFunctionType(*this, argTypes, 138 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 139} 140 141/// Arrange the argument and result information for a declaration or 142/// definition of the given C++ non-static member function. The 143/// member function must be an ordinary function, i.e. not a 144/// constructor or destructor. 145const CGFunctionInfo & 146CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 147 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 148 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 149 150 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 151 152 if (MD->isInstance()) { 153 // The abstract case is perfectly fine. 154 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 155 } 156 157 return arrangeFunctionType(prototype); 158} 159 160/// Arrange the argument and result information for a declaration 161/// or definition to the given constructor variant. 162const CGFunctionInfo & 163CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 164 CXXCtorType ctorKind) { 165 SmallVector<CanQualType, 16> argTypes; 166 argTypes.push_back(GetThisType(Context, D->getParent())); 167 CanQualType resultType = Context.VoidTy; 168 169 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 170 171 CanQual<FunctionProtoType> FTP = GetFormalType(D); 172 173 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 174 175 // Add the formal parameters. 176 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 177 argTypes.push_back(FTP->getArgType(i)); 178 179 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required); 180} 181 182/// Arrange the argument and result information for a declaration, 183/// definition, or call to the given destructor variant. It so 184/// happens that all three cases produce the same information. 185const CGFunctionInfo & 186CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 187 CXXDtorType dtorKind) { 188 SmallVector<CanQualType, 2> argTypes; 189 argTypes.push_back(GetThisType(Context, D->getParent())); 190 CanQualType resultType = Context.VoidTy; 191 192 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 193 194 CanQual<FunctionProtoType> FTP = GetFormalType(D); 195 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 196 197 return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), 198 RequiredArgs::All); 199} 200 201/// Arrange the argument and result information for the declaration or 202/// definition of the given function. 203const CGFunctionInfo & 204CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 205 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 206 if (MD->isInstance()) 207 return arrangeCXXMethodDeclaration(MD); 208 209 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 210 211 assert(isa<FunctionType>(FTy)); 212 213 // When declaring a function without a prototype, always use a 214 // non-variadic type. 215 if (isa<FunctionNoProtoType>(FTy)) { 216 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 217 return arrangeFunctionType(noProto->getResultType(), 218 ArrayRef<CanQualType>(), 219 noProto->getExtInfo(), 220 RequiredArgs::All); 221 } 222 223 assert(isa<FunctionProtoType>(FTy)); 224 return arrangeFunctionType(FTy.getAs<FunctionProtoType>()); 225} 226 227/// Arrange the argument and result information for the declaration or 228/// definition of an Objective-C method. 229const CGFunctionInfo & 230CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 231 // It happens that this is the same as a call with no optional 232 // arguments, except also using the formal 'self' type. 233 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 234} 235 236/// Arrange the argument and result information for the function type 237/// through which to perform a send to the given Objective-C method, 238/// using the given receiver type. The receiver type is not always 239/// the 'self' type of the method or even an Objective-C pointer type. 240/// This is *not* the right method for actually performing such a 241/// message send, due to the possibility of optional arguments. 242const CGFunctionInfo & 243CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 244 QualType receiverType) { 245 SmallVector<CanQualType, 16> argTys; 246 argTys.push_back(Context.getCanonicalParamType(receiverType)); 247 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 248 // FIXME: Kill copy? 249 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 250 e = MD->param_end(); i != e; ++i) { 251 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 252 } 253 254 FunctionType::ExtInfo einfo; 255 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 256 257 if (getContext().getLangOpts().ObjCAutoRefCount && 258 MD->hasAttr<NSReturnsRetainedAttr>()) 259 einfo = einfo.withProducesResult(true); 260 261 RequiredArgs required = 262 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 263 264 return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys, 265 einfo, required); 266} 267 268const CGFunctionInfo & 269CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 270 // FIXME: Do we need to handle ObjCMethodDecl? 271 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 272 273 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 274 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 275 276 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 277 return arrangeCXXDestructor(DD, GD.getDtorType()); 278 279 return arrangeFunctionDeclaration(FD); 280} 281 282/// Figure out the rules for calling a function with the given formal 283/// type using the given arguments. The arguments are necessary 284/// because the function might be unprototyped, in which case it's 285/// target-dependent in crazy ways. 286const CGFunctionInfo & 287CodeGenTypes::arrangeFunctionCall(const CallArgList &args, 288 const FunctionType *fnType) { 289 RequiredArgs required = RequiredArgs::All; 290 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 291 if (proto->isVariadic()) 292 required = RequiredArgs(proto->getNumArgs()); 293 } else if (CGM.getTargetCodeGenInfo() 294 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 295 required = RequiredArgs(0); 296 } 297 298 return arrangeFunctionCall(fnType->getResultType(), args, 299 fnType->getExtInfo(), required); 300} 301 302const CGFunctionInfo & 303CodeGenTypes::arrangeFunctionCall(QualType resultType, 304 const CallArgList &args, 305 const FunctionType::ExtInfo &info, 306 RequiredArgs required) { 307 // FIXME: Kill copy. 308 SmallVector<CanQualType, 16> argTypes; 309 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 310 i != e; ++i) 311 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 312 return arrangeFunctionType(GetReturnType(resultType), argTypes, info, 313 required); 314} 315 316const CGFunctionInfo & 317CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 318 const FunctionArgList &args, 319 const FunctionType::ExtInfo &info, 320 bool isVariadic) { 321 // FIXME: Kill copy. 322 SmallVector<CanQualType, 16> argTypes; 323 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 324 i != e; ++i) 325 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 326 327 RequiredArgs required = 328 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 329 return arrangeFunctionType(GetReturnType(resultType), argTypes, info, 330 required); 331} 332 333const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 334 return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(), 335 FunctionType::ExtInfo(), RequiredArgs::All); 336} 337 338/// Arrange the argument and result information for an abstract value 339/// of a given function type. This is the method which all of the 340/// above functions ultimately defer to. 341const CGFunctionInfo & 342CodeGenTypes::arrangeFunctionType(CanQualType resultType, 343 ArrayRef<CanQualType> argTypes, 344 const FunctionType::ExtInfo &info, 345 RequiredArgs required) { 346#ifndef NDEBUG 347 for (ArrayRef<CanQualType>::const_iterator 348 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 349 assert(I->isCanonicalAsParam()); 350#endif 351 352 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 353 354 // Lookup or create unique function info. 355 llvm::FoldingSetNodeID ID; 356 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 357 358 void *insertPos = 0; 359 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 360 if (FI) 361 return *FI; 362 363 // Construct the function info. We co-allocate the ArgInfos. 364 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 365 FunctionInfos.InsertNode(FI, insertPos); 366 367 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 368 assert(inserted && "Recursively being processed?"); 369 370 // Compute ABI information. 371 getABIInfo().computeInfo(*FI); 372 373 // Loop over all of the computed argument and return value info. If any of 374 // them are direct or extend without a specified coerce type, specify the 375 // default now. 376 ABIArgInfo &retInfo = FI->getReturnInfo(); 377 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 378 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 379 380 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 381 I != E; ++I) 382 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 383 I->info.setCoerceToType(ConvertType(I->type)); 384 385 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 386 assert(erased && "Not in set?"); 387 388 return *FI; 389} 390 391CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 392 const FunctionType::ExtInfo &info, 393 CanQualType resultType, 394 ArrayRef<CanQualType> argTypes, 395 RequiredArgs required) { 396 void *buffer = operator new(sizeof(CGFunctionInfo) + 397 sizeof(ArgInfo) * (argTypes.size() + 1)); 398 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 399 FI->CallingConvention = llvmCC; 400 FI->EffectiveCallingConvention = llvmCC; 401 FI->ASTCallingConvention = info.getCC(); 402 FI->NoReturn = info.getNoReturn(); 403 FI->ReturnsRetained = info.getProducesResult(); 404 FI->Required = required; 405 FI->HasRegParm = info.getHasRegParm(); 406 FI->RegParm = info.getRegParm(); 407 FI->NumArgs = argTypes.size(); 408 FI->getArgsBuffer()[0].type = resultType; 409 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 410 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 411 return FI; 412} 413 414/***/ 415 416void CodeGenTypes::GetExpandedTypes(QualType type, 417 SmallVectorImpl<llvm::Type*> &expandedTypes) { 418 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 419 uint64_t NumElts = AT->getSize().getZExtValue(); 420 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 421 GetExpandedTypes(AT->getElementType(), expandedTypes); 422 } else if (const RecordType *RT = type->getAsStructureType()) { 423 const RecordDecl *RD = RT->getDecl(); 424 assert(!RD->hasFlexibleArrayMember() && 425 "Cannot expand structure with flexible array."); 426 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 427 i != e; ++i) { 428 const FieldDecl *FD = *i; 429 assert(!FD->isBitField() && 430 "Cannot expand structure with bit-field members."); 431 GetExpandedTypes(FD->getType(), expandedTypes); 432 } 433 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 434 llvm::Type *EltTy = ConvertType(CT->getElementType()); 435 expandedTypes.push_back(EltTy); 436 expandedTypes.push_back(EltTy); 437 } else 438 expandedTypes.push_back(ConvertType(type)); 439} 440 441llvm::Function::arg_iterator 442CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 443 llvm::Function::arg_iterator AI) { 444 assert(LV.isSimple() && 445 "Unexpected non-simple lvalue during struct expansion."); 446 llvm::Value *Addr = LV.getAddress(); 447 448 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 449 unsigned NumElts = AT->getSize().getZExtValue(); 450 QualType EltTy = AT->getElementType(); 451 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 452 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 453 LValue LV = MakeAddrLValue(EltAddr, EltTy); 454 AI = ExpandTypeFromArgs(EltTy, LV, AI); 455 } 456 } else if (const RecordType *RT = Ty->getAsStructureType()) { 457 RecordDecl *RD = RT->getDecl(); 458 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 459 i != e; ++i) { 460 FieldDecl *FD = *i; 461 QualType FT = FD->getType(); 462 463 // FIXME: What are the right qualifiers here? 464 LValue LV = EmitLValueForField(Addr, FD, 0); 465 AI = ExpandTypeFromArgs(FT, LV, AI); 466 } 467 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 468 QualType EltTy = CT->getElementType(); 469 llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real"); 470 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 471 llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag"); 472 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 473 } else { 474 EmitStoreThroughLValue(RValue::get(AI), LV); 475 ++AI; 476 } 477 478 return AI; 479} 480 481/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 482/// accessing some number of bytes out of it, try to gep into the struct to get 483/// at its inner goodness. Dive as deep as possible without entering an element 484/// with an in-memory size smaller than DstSize. 485static llvm::Value * 486EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 487 llvm::StructType *SrcSTy, 488 uint64_t DstSize, CodeGenFunction &CGF) { 489 // We can't dive into a zero-element struct. 490 if (SrcSTy->getNumElements() == 0) return SrcPtr; 491 492 llvm::Type *FirstElt = SrcSTy->getElementType(0); 493 494 // If the first elt is at least as large as what we're looking for, or if the 495 // first element is the same size as the whole struct, we can enter it. 496 uint64_t FirstEltSize = 497 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); 498 if (FirstEltSize < DstSize && 499 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) 500 return SrcPtr; 501 502 // GEP into the first element. 503 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 504 505 // If the first element is a struct, recurse. 506 llvm::Type *SrcTy = 507 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 508 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 509 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 510 511 return SrcPtr; 512} 513 514/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 515/// are either integers or pointers. This does a truncation of the value if it 516/// is too large or a zero extension if it is too small. 517static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 518 llvm::Type *Ty, 519 CodeGenFunction &CGF) { 520 if (Val->getType() == Ty) 521 return Val; 522 523 if (isa<llvm::PointerType>(Val->getType())) { 524 // If this is Pointer->Pointer avoid conversion to and from int. 525 if (isa<llvm::PointerType>(Ty)) 526 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 527 528 // Convert the pointer to an integer so we can play with its width. 529 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 530 } 531 532 llvm::Type *DestIntTy = Ty; 533 if (isa<llvm::PointerType>(DestIntTy)) 534 DestIntTy = CGF.IntPtrTy; 535 536 if (Val->getType() != DestIntTy) 537 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 538 539 if (isa<llvm::PointerType>(Ty)) 540 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 541 return Val; 542} 543 544 545 546/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 547/// a pointer to an object of type \arg Ty. 548/// 549/// This safely handles the case when the src type is smaller than the 550/// destination type; in this situation the values of bits which not 551/// present in the src are undefined. 552static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 553 llvm::Type *Ty, 554 CodeGenFunction &CGF) { 555 llvm::Type *SrcTy = 556 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 557 558 // If SrcTy and Ty are the same, just do a load. 559 if (SrcTy == Ty) 560 return CGF.Builder.CreateLoad(SrcPtr); 561 562 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 563 564 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 565 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 566 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 567 } 568 569 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 570 571 // If the source and destination are integer or pointer types, just do an 572 // extension or truncation to the desired type. 573 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 574 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 575 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 576 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 577 } 578 579 // If load is legal, just bitcast the src pointer. 580 if (SrcSize >= DstSize) { 581 // Generally SrcSize is never greater than DstSize, since this means we are 582 // losing bits. However, this can happen in cases where the structure has 583 // additional padding, for example due to a user specified alignment. 584 // 585 // FIXME: Assert that we aren't truncating non-padding bits when have access 586 // to that information. 587 llvm::Value *Casted = 588 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 589 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 590 // FIXME: Use better alignment / avoid requiring aligned load. 591 Load->setAlignment(1); 592 return Load; 593 } 594 595 // Otherwise do coercion through memory. This is stupid, but 596 // simple. 597 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 598 llvm::Value *Casted = 599 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 600 llvm::StoreInst *Store = 601 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 602 // FIXME: Use better alignment / avoid requiring aligned store. 603 Store->setAlignment(1); 604 return CGF.Builder.CreateLoad(Tmp); 605} 606 607// Function to store a first-class aggregate into memory. We prefer to 608// store the elements rather than the aggregate to be more friendly to 609// fast-isel. 610// FIXME: Do we need to recurse here? 611static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 612 llvm::Value *DestPtr, bool DestIsVolatile, 613 bool LowAlignment) { 614 // Prefer scalar stores to first-class aggregate stores. 615 if (llvm::StructType *STy = 616 dyn_cast<llvm::StructType>(Val->getType())) { 617 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 618 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 619 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 620 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 621 DestIsVolatile); 622 if (LowAlignment) 623 SI->setAlignment(1); 624 } 625 } else { 626 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 627 if (LowAlignment) 628 SI->setAlignment(1); 629 } 630} 631 632/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 633/// where the source and destination may have different types. 634/// 635/// This safely handles the case when the src type is larger than the 636/// destination type; the upper bits of the src will be lost. 637static void CreateCoercedStore(llvm::Value *Src, 638 llvm::Value *DstPtr, 639 bool DstIsVolatile, 640 CodeGenFunction &CGF) { 641 llvm::Type *SrcTy = Src->getType(); 642 llvm::Type *DstTy = 643 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 644 if (SrcTy == DstTy) { 645 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 646 return; 647 } 648 649 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 650 651 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 652 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 653 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 654 } 655 656 // If the source and destination are integer or pointer types, just do an 657 // extension or truncation to the desired type. 658 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 659 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 660 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 661 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 662 return; 663 } 664 665 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 666 667 // If store is legal, just bitcast the src pointer. 668 if (SrcSize <= DstSize) { 669 llvm::Value *Casted = 670 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 671 // FIXME: Use better alignment / avoid requiring aligned store. 672 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 673 } else { 674 // Otherwise do coercion through memory. This is stupid, but 675 // simple. 676 677 // Generally SrcSize is never greater than DstSize, since this means we are 678 // losing bits. However, this can happen in cases where the structure has 679 // additional padding, for example due to a user specified alignment. 680 // 681 // FIXME: Assert that we aren't truncating non-padding bits when have access 682 // to that information. 683 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 684 CGF.Builder.CreateStore(Src, Tmp); 685 llvm::Value *Casted = 686 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 687 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 688 // FIXME: Use better alignment / avoid requiring aligned load. 689 Load->setAlignment(1); 690 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile); 691 } 692} 693 694/***/ 695 696bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 697 return FI.getReturnInfo().isIndirect(); 698} 699 700bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 701 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 702 switch (BT->getKind()) { 703 default: 704 return false; 705 case BuiltinType::Float: 706 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float); 707 case BuiltinType::Double: 708 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double); 709 case BuiltinType::LongDouble: 710 return getContext().getTargetInfo().useObjCFPRetForRealType( 711 TargetInfo::LongDouble); 712 } 713 } 714 715 return false; 716} 717 718bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 719 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 720 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 721 if (BT->getKind() == BuiltinType::LongDouble) 722 return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble(); 723 } 724 } 725 726 return false; 727} 728 729llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 730 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 731 return GetFunctionType(FI); 732} 733 734llvm::FunctionType * 735CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 736 737 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 738 assert(Inserted && "Recursively being processed?"); 739 740 SmallVector<llvm::Type*, 8> argTypes; 741 llvm::Type *resultType = 0; 742 743 const ABIArgInfo &retAI = FI.getReturnInfo(); 744 switch (retAI.getKind()) { 745 case ABIArgInfo::Expand: 746 llvm_unreachable("Invalid ABI kind for return argument"); 747 748 case ABIArgInfo::Extend: 749 case ABIArgInfo::Direct: 750 resultType = retAI.getCoerceToType(); 751 break; 752 753 case ABIArgInfo::Indirect: { 754 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 755 resultType = llvm::Type::getVoidTy(getLLVMContext()); 756 757 QualType ret = FI.getReturnType(); 758 llvm::Type *ty = ConvertType(ret); 759 unsigned addressSpace = Context.getTargetAddressSpace(ret); 760 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 761 break; 762 } 763 764 case ABIArgInfo::Ignore: 765 resultType = llvm::Type::getVoidTy(getLLVMContext()); 766 break; 767 } 768 769 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 770 ie = FI.arg_end(); it != ie; ++it) { 771 const ABIArgInfo &argAI = it->info; 772 773 switch (argAI.getKind()) { 774 case ABIArgInfo::Ignore: 775 break; 776 777 case ABIArgInfo::Indirect: { 778 // indirect arguments are always on the stack, which is addr space #0. 779 llvm::Type *LTy = ConvertTypeForMem(it->type); 780 argTypes.push_back(LTy->getPointerTo()); 781 break; 782 } 783 784 case ABIArgInfo::Extend: 785 case ABIArgInfo::Direct: { 786 // Insert a padding type to ensure proper alignment. 787 if (llvm::Type *PaddingType = argAI.getPaddingType()) 788 argTypes.push_back(PaddingType); 789 // If the coerce-to type is a first class aggregate, flatten it. Either 790 // way is semantically identical, but fast-isel and the optimizer 791 // generally likes scalar values better than FCAs. 792 llvm::Type *argType = argAI.getCoerceToType(); 793 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 794 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 795 argTypes.push_back(st->getElementType(i)); 796 } else { 797 argTypes.push_back(argType); 798 } 799 break; 800 } 801 802 case ABIArgInfo::Expand: 803 GetExpandedTypes(it->type, argTypes); 804 break; 805 } 806 } 807 808 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 809 assert(Erased && "Not in set?"); 810 811 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 812} 813 814llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 815 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 816 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 817 818 if (!isFuncTypeConvertible(FPT)) 819 return llvm::StructType::get(getLLVMContext()); 820 821 const CGFunctionInfo *Info; 822 if (isa<CXXDestructorDecl>(MD)) 823 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 824 else 825 Info = &arrangeCXXMethodDeclaration(MD); 826 return GetFunctionType(*Info); 827} 828 829void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 830 const Decl *TargetDecl, 831 AttributeListType &PAL, 832 unsigned &CallingConv) { 833 llvm::Attributes FuncAttrs; 834 llvm::Attributes RetAttrs; 835 836 CallingConv = FI.getEffectiveCallingConvention(); 837 838 if (FI.isNoReturn()) 839 FuncAttrs |= llvm::Attribute::NoReturn; 840 841 // FIXME: handle sseregparm someday... 842 if (TargetDecl) { 843 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 844 FuncAttrs |= llvm::Attribute::ReturnsTwice; 845 if (TargetDecl->hasAttr<NoThrowAttr>()) 846 FuncAttrs |= llvm::Attribute::NoUnwind; 847 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 848 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 849 if (FPT && FPT->isNothrow(getContext())) 850 FuncAttrs |= llvm::Attribute::NoUnwind; 851 } 852 853 if (TargetDecl->hasAttr<NoReturnAttr>()) 854 FuncAttrs |= llvm::Attribute::NoReturn; 855 856 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 857 FuncAttrs |= llvm::Attribute::ReturnsTwice; 858 859 // 'const' and 'pure' attribute functions are also nounwind. 860 if (TargetDecl->hasAttr<ConstAttr>()) { 861 FuncAttrs |= llvm::Attribute::ReadNone; 862 FuncAttrs |= llvm::Attribute::NoUnwind; 863 } else if (TargetDecl->hasAttr<PureAttr>()) { 864 FuncAttrs |= llvm::Attribute::ReadOnly; 865 FuncAttrs |= llvm::Attribute::NoUnwind; 866 } 867 if (TargetDecl->hasAttr<MallocAttr>()) 868 RetAttrs |= llvm::Attribute::NoAlias; 869 } 870 871 if (CodeGenOpts.OptimizeSize) 872 FuncAttrs |= llvm::Attribute::OptimizeForSize; 873 if (CodeGenOpts.DisableRedZone) 874 FuncAttrs |= llvm::Attribute::NoRedZone; 875 if (CodeGenOpts.NoImplicitFloat) 876 FuncAttrs |= llvm::Attribute::NoImplicitFloat; 877 878 QualType RetTy = FI.getReturnType(); 879 unsigned Index = 1; 880 const ABIArgInfo &RetAI = FI.getReturnInfo(); 881 switch (RetAI.getKind()) { 882 case ABIArgInfo::Extend: 883 if (RetTy->hasSignedIntegerRepresentation()) 884 RetAttrs |= llvm::Attribute::SExt; 885 else if (RetTy->hasUnsignedIntegerRepresentation()) 886 RetAttrs |= llvm::Attribute::ZExt; 887 break; 888 case ABIArgInfo::Direct: 889 case ABIArgInfo::Ignore: 890 break; 891 892 case ABIArgInfo::Indirect: 893 PAL.push_back(llvm::AttributeWithIndex::get(Index, 894 llvm::Attribute::StructRet)); 895 ++Index; 896 // sret disables readnone and readonly 897 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 898 llvm::Attribute::ReadNone); 899 break; 900 901 case ABIArgInfo::Expand: 902 llvm_unreachable("Invalid ABI kind for return argument"); 903 } 904 905 if (RetAttrs) 906 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 907 908 // FIXME: RegParm should be reduced in case of global register variable. 909 signed RegParm; 910 if (FI.getHasRegParm()) 911 RegParm = FI.getRegParm(); 912 else 913 RegParm = CodeGenOpts.NumRegisterParameters; 914 915 unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0); 916 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 917 ie = FI.arg_end(); it != ie; ++it) { 918 QualType ParamType = it->type; 919 const ABIArgInfo &AI = it->info; 920 llvm::Attributes Attrs; 921 922 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 923 // have the corresponding parameter variable. It doesn't make 924 // sense to do it here because parameters are so messed up. 925 switch (AI.getKind()) { 926 case ABIArgInfo::Extend: 927 if (ParamType->isSignedIntegerOrEnumerationType()) 928 Attrs |= llvm::Attribute::SExt; 929 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 930 Attrs |= llvm::Attribute::ZExt; 931 // FALL THROUGH 932 case ABIArgInfo::Direct: 933 if (RegParm > 0 && 934 (ParamType->isIntegerType() || ParamType->isPointerType() || 935 ParamType->isReferenceType())) { 936 RegParm -= 937 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 938 if (RegParm >= 0) 939 Attrs |= llvm::Attribute::InReg; 940 } 941 // FIXME: handle sseregparm someday... 942 943 // Increment Index if there is padding. 944 Index += (AI.getPaddingType() != 0); 945 946 if (llvm::StructType *STy = 947 dyn_cast<llvm::StructType>(AI.getCoerceToType())) 948 Index += STy->getNumElements()-1; // 1 will be added below. 949 break; 950 951 case ABIArgInfo::Indirect: 952 if (AI.getIndirectByVal()) 953 Attrs |= llvm::Attribute::ByVal; 954 955 Attrs |= 956 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 957 // byval disables readnone and readonly. 958 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 959 llvm::Attribute::ReadNone); 960 break; 961 962 case ABIArgInfo::Ignore: 963 // Skip increment, no matching LLVM parameter. 964 continue; 965 966 case ABIArgInfo::Expand: { 967 SmallVector<llvm::Type*, 8> types; 968 // FIXME: This is rather inefficient. Do we ever actually need to do 969 // anything here? The result should be just reconstructed on the other 970 // side, so extension should be a non-issue. 971 getTypes().GetExpandedTypes(ParamType, types); 972 Index += types.size(); 973 continue; 974 } 975 } 976 977 if (Attrs) 978 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs)); 979 ++Index; 980 } 981 if (FuncAttrs) 982 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 983} 984 985/// An argument came in as a promoted argument; demote it back to its 986/// declared type. 987static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 988 const VarDecl *var, 989 llvm::Value *value) { 990 llvm::Type *varType = CGF.ConvertType(var->getType()); 991 992 // This can happen with promotions that actually don't change the 993 // underlying type, like the enum promotions. 994 if (value->getType() == varType) return value; 995 996 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 997 && "unexpected promotion type"); 998 999 if (isa<llvm::IntegerType>(varType)) 1000 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1001 1002 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1003} 1004 1005void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1006 llvm::Function *Fn, 1007 const FunctionArgList &Args) { 1008 // If this is an implicit-return-zero function, go ahead and 1009 // initialize the return value. TODO: it might be nice to have 1010 // a more general mechanism for this that didn't require synthesized 1011 // return statements. 1012 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 1013 if (FD->hasImplicitReturnZero()) { 1014 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1015 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1016 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1017 Builder.CreateStore(Zero, ReturnValue); 1018 } 1019 } 1020 1021 // FIXME: We no longer need the types from FunctionArgList; lift up and 1022 // simplify. 1023 1024 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1025 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1026 1027 // Name the struct return argument. 1028 if (CGM.ReturnTypeUsesSRet(FI)) { 1029 AI->setName("agg.result"); 1030 AI->addAttr(llvm::Attribute::NoAlias); 1031 ++AI; 1032 } 1033 1034 assert(FI.arg_size() == Args.size() && 1035 "Mismatch between function signature & arguments."); 1036 unsigned ArgNo = 1; 1037 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1038 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1039 i != e; ++i, ++info_it, ++ArgNo) { 1040 const VarDecl *Arg = *i; 1041 QualType Ty = info_it->type; 1042 const ABIArgInfo &ArgI = info_it->info; 1043 1044 bool isPromoted = 1045 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1046 1047 switch (ArgI.getKind()) { 1048 case ABIArgInfo::Indirect: { 1049 llvm::Value *V = AI; 1050 1051 if (hasAggregateLLVMType(Ty)) { 1052 // Aggregates and complex variables are accessed by reference. All we 1053 // need to do is realign the value, if requested 1054 if (ArgI.getIndirectRealign()) { 1055 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1056 1057 // Copy from the incoming argument pointer to the temporary with the 1058 // appropriate alignment. 1059 // 1060 // FIXME: We should have a common utility for generating an aggregate 1061 // copy. 1062 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1063 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1064 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1065 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1066 Builder.CreateMemCpy(Dst, 1067 Src, 1068 llvm::ConstantInt::get(IntPtrTy, 1069 Size.getQuantity()), 1070 ArgI.getIndirectAlign(), 1071 false); 1072 V = AlignedTemp; 1073 } 1074 } else { 1075 // Load scalar value from indirect argument. 1076 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1077 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1078 1079 if (isPromoted) 1080 V = emitArgumentDemotion(*this, Arg, V); 1081 } 1082 EmitParmDecl(*Arg, V, ArgNo); 1083 break; 1084 } 1085 1086 case ABIArgInfo::Extend: 1087 case ABIArgInfo::Direct: { 1088 // Skip the dummy padding argument. 1089 if (ArgI.getPaddingType()) 1090 ++AI; 1091 1092 // If we have the trivial case, handle it with no muss and fuss. 1093 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1094 ArgI.getCoerceToType() == ConvertType(Ty) && 1095 ArgI.getDirectOffset() == 0) { 1096 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1097 llvm::Value *V = AI; 1098 1099 if (Arg->getType().isRestrictQualified()) 1100 AI->addAttr(llvm::Attribute::NoAlias); 1101 1102 // Ensure the argument is the correct type. 1103 if (V->getType() != ArgI.getCoerceToType()) 1104 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1105 1106 if (isPromoted) 1107 V = emitArgumentDemotion(*this, Arg, V); 1108 1109 EmitParmDecl(*Arg, V, ArgNo); 1110 break; 1111 } 1112 1113 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1114 1115 // The alignment we need to use is the max of the requested alignment for 1116 // the argument plus the alignment required by our access code below. 1117 unsigned AlignmentToUse = 1118 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType()); 1119 AlignmentToUse = std::max(AlignmentToUse, 1120 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1121 1122 Alloca->setAlignment(AlignmentToUse); 1123 llvm::Value *V = Alloca; 1124 llvm::Value *Ptr = V; // Pointer to store into. 1125 1126 // If the value is offset in memory, apply the offset now. 1127 if (unsigned Offs = ArgI.getDirectOffset()) { 1128 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1129 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1130 Ptr = Builder.CreateBitCast(Ptr, 1131 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1132 } 1133 1134 // If the coerce-to type is a first class aggregate, we flatten it and 1135 // pass the elements. Either way is semantically identical, but fast-isel 1136 // and the optimizer generally likes scalar values better than FCAs. 1137 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1138 if (STy && STy->getNumElements() > 1) { 1139 uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy); 1140 llvm::Type *DstTy = 1141 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1142 uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy); 1143 1144 if (SrcSize <= DstSize) { 1145 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1146 1147 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1148 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1149 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1150 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1151 Builder.CreateStore(AI++, EltPtr); 1152 } 1153 } else { 1154 llvm::AllocaInst *TempAlloca = 1155 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1156 TempAlloca->setAlignment(AlignmentToUse); 1157 llvm::Value *TempV = TempAlloca; 1158 1159 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1160 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1161 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1162 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1163 Builder.CreateStore(AI++, EltPtr); 1164 } 1165 1166 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1167 } 1168 } else { 1169 // Simple case, just do a coerced store of the argument into the alloca. 1170 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1171 AI->setName(Arg->getName() + ".coerce"); 1172 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1173 } 1174 1175 1176 // Match to what EmitParmDecl is expecting for this type. 1177 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1178 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1179 if (isPromoted) 1180 V = emitArgumentDemotion(*this, Arg, V); 1181 } 1182 EmitParmDecl(*Arg, V, ArgNo); 1183 continue; // Skip ++AI increment, already done. 1184 } 1185 1186 case ABIArgInfo::Expand: { 1187 // If this structure was expanded into multiple arguments then 1188 // we need to create a temporary and reconstruct it from the 1189 // arguments. 1190 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1191 CharUnits Align = getContext().getDeclAlign(Arg); 1192 Alloca->setAlignment(Align.getQuantity()); 1193 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1194 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1195 EmitParmDecl(*Arg, Alloca, ArgNo); 1196 1197 // Name the arguments used in expansion and increment AI. 1198 unsigned Index = 0; 1199 for (; AI != End; ++AI, ++Index) 1200 AI->setName(Arg->getName() + "." + Twine(Index)); 1201 continue; 1202 } 1203 1204 case ABIArgInfo::Ignore: 1205 // Initialize the local variable appropriately. 1206 if (hasAggregateLLVMType(Ty)) 1207 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1208 else 1209 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1210 ArgNo); 1211 1212 // Skip increment, no matching LLVM parameter. 1213 continue; 1214 } 1215 1216 ++AI; 1217 } 1218 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1219} 1220 1221static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1222 while (insn->use_empty()) { 1223 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1224 if (!bitcast) return; 1225 1226 // This is "safe" because we would have used a ConstantExpr otherwise. 1227 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1228 bitcast->eraseFromParent(); 1229 } 1230} 1231 1232/// Try to emit a fused autorelease of a return result. 1233static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1234 llvm::Value *result) { 1235 // We must be immediately followed the cast. 1236 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1237 if (BB->empty()) return 0; 1238 if (&BB->back() != result) return 0; 1239 1240 llvm::Type *resultType = result->getType(); 1241 1242 // result is in a BasicBlock and is therefore an Instruction. 1243 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1244 1245 SmallVector<llvm::Instruction*,4> insnsToKill; 1246 1247 // Look for: 1248 // %generator = bitcast %type1* %generator2 to %type2* 1249 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1250 // We would have emitted this as a constant if the operand weren't 1251 // an Instruction. 1252 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1253 1254 // Require the generator to be immediately followed by the cast. 1255 if (generator->getNextNode() != bitcast) 1256 return 0; 1257 1258 insnsToKill.push_back(bitcast); 1259 } 1260 1261 // Look for: 1262 // %generator = call i8* @objc_retain(i8* %originalResult) 1263 // or 1264 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1265 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1266 if (!call) return 0; 1267 1268 bool doRetainAutorelease; 1269 1270 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1271 doRetainAutorelease = true; 1272 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1273 .objc_retainAutoreleasedReturnValue) { 1274 doRetainAutorelease = false; 1275 1276 // Look for an inline asm immediately preceding the call and kill it, too. 1277 llvm::Instruction *prev = call->getPrevNode(); 1278 if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev)) 1279 if (asmCall->getCalledValue() 1280 == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) 1281 insnsToKill.push_back(prev); 1282 } else { 1283 return 0; 1284 } 1285 1286 result = call->getArgOperand(0); 1287 insnsToKill.push_back(call); 1288 1289 // Keep killing bitcasts, for sanity. Note that we no longer care 1290 // about precise ordering as long as there's exactly one use. 1291 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1292 if (!bitcast->hasOneUse()) break; 1293 insnsToKill.push_back(bitcast); 1294 result = bitcast->getOperand(0); 1295 } 1296 1297 // Delete all the unnecessary instructions, from latest to earliest. 1298 for (SmallVectorImpl<llvm::Instruction*>::iterator 1299 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1300 (*i)->eraseFromParent(); 1301 1302 // Do the fused retain/autorelease if we were asked to. 1303 if (doRetainAutorelease) 1304 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1305 1306 // Cast back to the result type. 1307 return CGF.Builder.CreateBitCast(result, resultType); 1308} 1309 1310/// If this is a +1 of the value of an immutable 'self', remove it. 1311static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1312 llvm::Value *result) { 1313 // This is only applicable to a method with an immutable 'self'. 1314 const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl); 1315 if (!method) return 0; 1316 const VarDecl *self = method->getSelfDecl(); 1317 if (!self->getType().isConstQualified()) return 0; 1318 1319 // Look for a retain call. 1320 llvm::CallInst *retainCall = 1321 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1322 if (!retainCall || 1323 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1324 return 0; 1325 1326 // Look for an ordinary load of 'self'. 1327 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1328 llvm::LoadInst *load = 1329 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1330 if (!load || load->isAtomic() || load->isVolatile() || 1331 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1332 return 0; 1333 1334 // Okay! Burn it all down. This relies for correctness on the 1335 // assumption that the retain is emitted as part of the return and 1336 // that thereafter everything is used "linearly". 1337 llvm::Type *resultType = result->getType(); 1338 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1339 assert(retainCall->use_empty()); 1340 retainCall->eraseFromParent(); 1341 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1342 1343 return CGF.Builder.CreateBitCast(load, resultType); 1344} 1345 1346/// Emit an ARC autorelease of the result of a function. 1347/// 1348/// \return the value to actually return from the function 1349static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1350 llvm::Value *result) { 1351 // If we're returning 'self', kill the initial retain. This is a 1352 // heuristic attempt to "encourage correctness" in the really unfortunate 1353 // case where we have a return of self during a dealloc and we desperately 1354 // need to avoid the possible autorelease. 1355 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1356 return self; 1357 1358 // At -O0, try to emit a fused retain/autorelease. 1359 if (CGF.shouldUseFusedARCCalls()) 1360 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1361 return fused; 1362 1363 return CGF.EmitARCAutoreleaseReturnValue(result); 1364} 1365 1366/// Heuristically search for a dominating store to the return-value slot. 1367static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1368 // If there are multiple uses of the return-value slot, just check 1369 // for something immediately preceding the IP. Sometimes this can 1370 // happen with how we generate implicit-returns; it can also happen 1371 // with noreturn cleanups. 1372 if (!CGF.ReturnValue->hasOneUse()) { 1373 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1374 if (IP->empty()) return 0; 1375 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1376 if (!store) return 0; 1377 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1378 assert(!store->isAtomic() && !store->isVolatile()); // see below 1379 return store; 1380 } 1381 1382 llvm::StoreInst *store = 1383 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1384 if (!store) return 0; 1385 1386 // These aren't actually possible for non-coerced returns, and we 1387 // only care about non-coerced returns on this code path. 1388 assert(!store->isAtomic() && !store->isVolatile()); 1389 1390 // Now do a first-and-dirty dominance check: just walk up the 1391 // single-predecessors chain from the current insertion point. 1392 llvm::BasicBlock *StoreBB = store->getParent(); 1393 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1394 while (IP != StoreBB) { 1395 if (!(IP = IP->getSinglePredecessor())) 1396 return 0; 1397 } 1398 1399 // Okay, the store's basic block dominates the insertion point; we 1400 // can do our thing. 1401 return store; 1402} 1403 1404void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { 1405 // Functions with no result always return void. 1406 if (ReturnValue == 0) { 1407 Builder.CreateRetVoid(); 1408 return; 1409 } 1410 1411 llvm::DebugLoc RetDbgLoc; 1412 llvm::Value *RV = 0; 1413 QualType RetTy = FI.getReturnType(); 1414 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1415 1416 switch (RetAI.getKind()) { 1417 case ABIArgInfo::Indirect: { 1418 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 1419 if (RetTy->isAnyComplexType()) { 1420 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1421 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1422 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1423 // Do nothing; aggregrates get evaluated directly into the destination. 1424 } else { 1425 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1426 false, Alignment, RetTy); 1427 } 1428 break; 1429 } 1430 1431 case ABIArgInfo::Extend: 1432 case ABIArgInfo::Direct: 1433 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1434 RetAI.getDirectOffset() == 0) { 1435 // The internal return value temp always will have pointer-to-return-type 1436 // type, just do a load. 1437 1438 // If there is a dominating store to ReturnValue, we can elide 1439 // the load, zap the store, and usually zap the alloca. 1440 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1441 // Get the stored value and nuke the now-dead store. 1442 RetDbgLoc = SI->getDebugLoc(); 1443 RV = SI->getValueOperand(); 1444 SI->eraseFromParent(); 1445 1446 // If that was the only use of the return value, nuke it as well now. 1447 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1448 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1449 ReturnValue = 0; 1450 } 1451 1452 // Otherwise, we have to do a simple load. 1453 } else { 1454 RV = Builder.CreateLoad(ReturnValue); 1455 } 1456 } else { 1457 llvm::Value *V = ReturnValue; 1458 // If the value is offset in memory, apply the offset now. 1459 if (unsigned Offs = RetAI.getDirectOffset()) { 1460 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1461 V = Builder.CreateConstGEP1_32(V, Offs); 1462 V = Builder.CreateBitCast(V, 1463 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1464 } 1465 1466 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1467 } 1468 1469 // In ARC, end functions that return a retainable type with a call 1470 // to objc_autoreleaseReturnValue. 1471 if (AutoreleaseResult) { 1472 assert(getLangOpts().ObjCAutoRefCount && 1473 !FI.isReturnsRetained() && 1474 RetTy->isObjCRetainableType()); 1475 RV = emitAutoreleaseOfResult(*this, RV); 1476 } 1477 1478 break; 1479 1480 case ABIArgInfo::Ignore: 1481 break; 1482 1483 case ABIArgInfo::Expand: 1484 llvm_unreachable("Invalid ABI kind for return argument"); 1485 } 1486 1487 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1488 if (!RetDbgLoc.isUnknown()) 1489 Ret->setDebugLoc(RetDbgLoc); 1490} 1491 1492void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1493 const VarDecl *param) { 1494 // StartFunction converted the ABI-lowered parameter(s) into a 1495 // local alloca. We need to turn that into an r-value suitable 1496 // for EmitCall. 1497 llvm::Value *local = GetAddrOfLocalVar(param); 1498 1499 QualType type = param->getType(); 1500 1501 // For the most part, we just need to load the alloca, except: 1502 // 1) aggregate r-values are actually pointers to temporaries, and 1503 // 2) references to aggregates are pointers directly to the aggregate. 1504 // I don't know why references to non-aggregates are different here. 1505 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1506 if (hasAggregateLLVMType(ref->getPointeeType())) 1507 return args.add(RValue::getAggregate(local), type); 1508 1509 // Locals which are references to scalars are represented 1510 // with allocas holding the pointer. 1511 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1512 } 1513 1514 if (type->isAnyComplexType()) { 1515 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false); 1516 return args.add(RValue::getComplex(complex), type); 1517 } 1518 1519 if (hasAggregateLLVMType(type)) 1520 return args.add(RValue::getAggregate(local), type); 1521 1522 unsigned alignment = getContext().getDeclAlign(param).getQuantity(); 1523 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type); 1524 return args.add(RValue::get(value), type); 1525} 1526 1527static bool isProvablyNull(llvm::Value *addr) { 1528 return isa<llvm::ConstantPointerNull>(addr); 1529} 1530 1531static bool isProvablyNonNull(llvm::Value *addr) { 1532 return isa<llvm::AllocaInst>(addr); 1533} 1534 1535/// Emit the actual writing-back of a writeback. 1536static void emitWriteback(CodeGenFunction &CGF, 1537 const CallArgList::Writeback &writeback) { 1538 llvm::Value *srcAddr = writeback.Address; 1539 assert(!isProvablyNull(srcAddr) && 1540 "shouldn't have writeback for provably null argument"); 1541 1542 llvm::BasicBlock *contBB = 0; 1543 1544 // If the argument wasn't provably non-null, we need to null check 1545 // before doing the store. 1546 bool provablyNonNull = isProvablyNonNull(srcAddr); 1547 if (!provablyNonNull) { 1548 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1549 contBB = CGF.createBasicBlock("icr.done"); 1550 1551 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1552 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1553 CGF.EmitBlock(writebackBB); 1554 } 1555 1556 // Load the value to writeback. 1557 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1558 1559 // Cast it back, in case we're writing an id to a Foo* or something. 1560 value = CGF.Builder.CreateBitCast(value, 1561 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1562 "icr.writeback-cast"); 1563 1564 // Perform the writeback. 1565 QualType srcAddrType = writeback.AddressType; 1566 CGF.EmitStoreThroughLValue(RValue::get(value), 1567 CGF.MakeAddrLValue(srcAddr, srcAddrType)); 1568 1569 // Jump to the continuation block. 1570 if (!provablyNonNull) 1571 CGF.EmitBlock(contBB); 1572} 1573 1574static void emitWritebacks(CodeGenFunction &CGF, 1575 const CallArgList &args) { 1576 for (CallArgList::writeback_iterator 1577 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1578 emitWriteback(CGF, *i); 1579} 1580 1581/// Emit an argument that's being passed call-by-writeback. That is, 1582/// we are passing the address of 1583static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1584 const ObjCIndirectCopyRestoreExpr *CRE) { 1585 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1586 1587 // The dest and src types don't necessarily match in LLVM terms 1588 // because of the crazy ObjC compatibility rules. 1589 1590 llvm::PointerType *destType = 1591 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1592 1593 // If the address is a constant null, just pass the appropriate null. 1594 if (isProvablyNull(srcAddr)) { 1595 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1596 CRE->getType()); 1597 return; 1598 } 1599 1600 QualType srcAddrType = 1601 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1602 1603 // Create the temporary. 1604 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1605 "icr.temp"); 1606 1607 // Zero-initialize it if we're not doing a copy-initialization. 1608 bool shouldCopy = CRE->shouldCopy(); 1609 if (!shouldCopy) { 1610 llvm::Value *null = 1611 llvm::ConstantPointerNull::get( 1612 cast<llvm::PointerType>(destType->getElementType())); 1613 CGF.Builder.CreateStore(null, temp); 1614 } 1615 1616 llvm::BasicBlock *contBB = 0; 1617 1618 // If the address is *not* known to be non-null, we need to switch. 1619 llvm::Value *finalArgument; 1620 1621 bool provablyNonNull = isProvablyNonNull(srcAddr); 1622 if (provablyNonNull) { 1623 finalArgument = temp; 1624 } else { 1625 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1626 1627 finalArgument = CGF.Builder.CreateSelect(isNull, 1628 llvm::ConstantPointerNull::get(destType), 1629 temp, "icr.argument"); 1630 1631 // If we need to copy, then the load has to be conditional, which 1632 // means we need control flow. 1633 if (shouldCopy) { 1634 contBB = CGF.createBasicBlock("icr.cont"); 1635 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1636 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1637 CGF.EmitBlock(copyBB); 1638 } 1639 } 1640 1641 // Perform a copy if necessary. 1642 if (shouldCopy) { 1643 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 1644 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1645 assert(srcRV.isScalar()); 1646 1647 llvm::Value *src = srcRV.getScalarVal(); 1648 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1649 "icr.cast"); 1650 1651 // Use an ordinary store, not a store-to-lvalue. 1652 CGF.Builder.CreateStore(src, temp); 1653 } 1654 1655 // Finish the control flow if we needed it. 1656 if (shouldCopy && !provablyNonNull) 1657 CGF.EmitBlock(contBB); 1658 1659 args.addWriteback(srcAddr, srcAddrType, temp); 1660 args.add(RValue::get(finalArgument), CRE->getType()); 1661} 1662 1663void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1664 QualType type) { 1665 if (const ObjCIndirectCopyRestoreExpr *CRE 1666 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1667 assert(getContext().getLangOpts().ObjCAutoRefCount); 1668 assert(getContext().hasSameType(E->getType(), type)); 1669 return emitWritebackArg(*this, args, CRE); 1670 } 1671 1672 assert(type->isReferenceType() == E->isGLValue() && 1673 "reference binding to unmaterialized r-value!"); 1674 1675 if (E->isGLValue()) { 1676 assert(E->getObjectKind() == OK_Ordinary); 1677 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1678 type); 1679 } 1680 1681 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() && 1682 isa<ImplicitCastExpr>(E) && 1683 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1684 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1685 assert(L.isSimple()); 1686 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 1687 return; 1688 } 1689 1690 args.add(EmitAnyExprToTemp(E), type); 1691} 1692 1693// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1694// optimizer it can aggressively ignore unwind edges. 1695void 1696CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 1697 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 1698 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 1699 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 1700 CGM.getNoObjCARCExceptionsMetadata()); 1701} 1702 1703/// Emits a call or invoke instruction to the given function, depending 1704/// on the current state of the EH stack. 1705llvm::CallSite 1706CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1707 ArrayRef<llvm::Value *> Args, 1708 const Twine &Name) { 1709 llvm::BasicBlock *InvokeDest = getInvokeDest(); 1710 1711 llvm::Instruction *Inst; 1712 if (!InvokeDest) 1713 Inst = Builder.CreateCall(Callee, Args, Name); 1714 else { 1715 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 1716 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 1717 EmitBlock(ContBB); 1718 } 1719 1720 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 1721 // optimizer it can aggressively ignore unwind edges. 1722 if (CGM.getLangOpts().ObjCAutoRefCount) 1723 AddObjCARCExceptionMetadata(Inst); 1724 1725 return Inst; 1726} 1727 1728llvm::CallSite 1729CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 1730 const Twine &Name) { 1731 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 1732} 1733 1734static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 1735 llvm::FunctionType *FTy) { 1736 if (ArgNo < FTy->getNumParams()) 1737 assert(Elt->getType() == FTy->getParamType(ArgNo)); 1738 else 1739 assert(FTy->isVarArg()); 1740 ++ArgNo; 1741} 1742 1743void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1744 SmallVector<llvm::Value*,16> &Args, 1745 llvm::FunctionType *IRFuncTy) { 1746 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1747 unsigned NumElts = AT->getSize().getZExtValue(); 1748 QualType EltTy = AT->getElementType(); 1749 llvm::Value *Addr = RV.getAggregateAddr(); 1750 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 1751 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 1752 LValue LV = MakeAddrLValue(EltAddr, EltTy); 1753 RValue EltRV; 1754 if (EltTy->isAnyComplexType()) 1755 // FIXME: Volatile? 1756 EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1757 else if (CodeGenFunction::hasAggregateLLVMType(EltTy)) 1758 EltRV = LV.asAggregateRValue(); 1759 else 1760 EltRV = EmitLoadOfLValue(LV); 1761 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 1762 } 1763 } else if (const RecordType *RT = Ty->getAsStructureType()) { 1764 RecordDecl *RD = RT->getDecl(); 1765 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1766 llvm::Value *Addr = RV.getAggregateAddr(); 1767 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1768 i != e; ++i) { 1769 FieldDecl *FD = *i; 1770 QualType FT = FD->getType(); 1771 1772 // FIXME: What are the right qualifiers here? 1773 LValue LV = EmitLValueForField(Addr, FD, 0); 1774 RValue FldRV; 1775 if (FT->isAnyComplexType()) 1776 // FIXME: Volatile? 1777 FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false)); 1778 else if (CodeGenFunction::hasAggregateLLVMType(FT)) 1779 FldRV = LV.asAggregateRValue(); 1780 else 1781 FldRV = EmitLoadOfLValue(LV); 1782 ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy); 1783 } 1784 } else if (Ty->isAnyComplexType()) { 1785 ComplexPairTy CV = RV.getComplexVal(); 1786 Args.push_back(CV.first); 1787 Args.push_back(CV.second); 1788 } else { 1789 assert(RV.isScalar() && 1790 "Unexpected non-scalar rvalue during struct expansion."); 1791 1792 // Insert a bitcast as needed. 1793 llvm::Value *V = RV.getScalarVal(); 1794 if (Args.size() < IRFuncTy->getNumParams() && 1795 V->getType() != IRFuncTy->getParamType(Args.size())) 1796 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 1797 1798 Args.push_back(V); 1799 } 1800} 1801 1802 1803RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 1804 llvm::Value *Callee, 1805 ReturnValueSlot ReturnValue, 1806 const CallArgList &CallArgs, 1807 const Decl *TargetDecl, 1808 llvm::Instruction **callOrInvoke) { 1809 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 1810 SmallVector<llvm::Value*, 16> Args; 1811 1812 // Handle struct-return functions by passing a pointer to the 1813 // location that we would like to return into. 1814 QualType RetTy = CallInfo.getReturnType(); 1815 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 1816 1817 // IRArgNo - Keep track of the argument number in the callee we're looking at. 1818 unsigned IRArgNo = 0; 1819 llvm::FunctionType *IRFuncTy = 1820 cast<llvm::FunctionType>( 1821 cast<llvm::PointerType>(Callee->getType())->getElementType()); 1822 1823 // If the call returns a temporary with struct return, create a temporary 1824 // alloca to hold the result, unless one is given to us. 1825 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 1826 llvm::Value *Value = ReturnValue.getValue(); 1827 if (!Value) 1828 Value = CreateMemTemp(RetTy); 1829 Args.push_back(Value); 1830 checkArgMatches(Value, IRArgNo, IRFuncTy); 1831 } 1832 1833 assert(CallInfo.arg_size() == CallArgs.size() && 1834 "Mismatch between function signature & arguments."); 1835 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 1836 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 1837 I != E; ++I, ++info_it) { 1838 const ABIArgInfo &ArgInfo = info_it->info; 1839 RValue RV = I->RV; 1840 1841 unsigned TypeAlign = 1842 getContext().getTypeAlignInChars(I->Ty).getQuantity(); 1843 switch (ArgInfo.getKind()) { 1844 case ABIArgInfo::Indirect: { 1845 if (RV.isScalar() || RV.isComplex()) { 1846 // Make a temporary alloca to pass the argument. 1847 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1848 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 1849 AI->setAlignment(ArgInfo.getIndirectAlign()); 1850 Args.push_back(AI); 1851 1852 if (RV.isScalar()) 1853 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, 1854 TypeAlign, I->Ty); 1855 else 1856 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 1857 1858 // Validate argument match. 1859 checkArgMatches(AI, IRArgNo, IRFuncTy); 1860 } else { 1861 // We want to avoid creating an unnecessary temporary+copy here; 1862 // however, we need one in two cases: 1863 // 1. If the argument is not byval, and we are required to copy the 1864 // source. (This case doesn't occur on any common architecture.) 1865 // 2. If the argument is byval, RV is not sufficiently aligned, and 1866 // we cannot force it to be sufficiently aligned. 1867 llvm::Value *Addr = RV.getAggregateAddr(); 1868 unsigned Align = ArgInfo.getIndirectAlign(); 1869 const llvm::TargetData *TD = &CGM.getTargetData(); 1870 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 1871 (ArgInfo.getIndirectByVal() && TypeAlign < Align && 1872 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) { 1873 // Create an aligned temporary, and copy to it. 1874 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 1875 if (Align > AI->getAlignment()) 1876 AI->setAlignment(Align); 1877 Args.push_back(AI); 1878 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 1879 1880 // Validate argument match. 1881 checkArgMatches(AI, IRArgNo, IRFuncTy); 1882 } else { 1883 // Skip the extra memcpy call. 1884 Args.push_back(Addr); 1885 1886 // Validate argument match. 1887 checkArgMatches(Addr, IRArgNo, IRFuncTy); 1888 } 1889 } 1890 break; 1891 } 1892 1893 case ABIArgInfo::Ignore: 1894 break; 1895 1896 case ABIArgInfo::Extend: 1897 case ABIArgInfo::Direct: { 1898 // Insert a padding argument to ensure proper alignment. 1899 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 1900 Args.push_back(llvm::UndefValue::get(PaddingType)); 1901 ++IRArgNo; 1902 } 1903 1904 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 1905 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 1906 ArgInfo.getDirectOffset() == 0) { 1907 llvm::Value *V; 1908 if (RV.isScalar()) 1909 V = RV.getScalarVal(); 1910 else 1911 V = Builder.CreateLoad(RV.getAggregateAddr()); 1912 1913 // If the argument doesn't match, perform a bitcast to coerce it. This 1914 // can happen due to trivial type mismatches. 1915 if (IRArgNo < IRFuncTy->getNumParams() && 1916 V->getType() != IRFuncTy->getParamType(IRArgNo)) 1917 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 1918 Args.push_back(V); 1919 1920 checkArgMatches(V, IRArgNo, IRFuncTy); 1921 break; 1922 } 1923 1924 // FIXME: Avoid the conversion through memory if possible. 1925 llvm::Value *SrcPtr; 1926 if (RV.isScalar()) { 1927 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1928 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty); 1929 } else if (RV.isComplex()) { 1930 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 1931 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 1932 } else 1933 SrcPtr = RV.getAggregateAddr(); 1934 1935 // If the value is offset in memory, apply the offset now. 1936 if (unsigned Offs = ArgInfo.getDirectOffset()) { 1937 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 1938 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 1939 SrcPtr = Builder.CreateBitCast(SrcPtr, 1940 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 1941 1942 } 1943 1944 // If the coerce-to type is a first class aggregate, we flatten it and 1945 // pass the elements. Either way is semantically identical, but fast-isel 1946 // and the optimizer generally likes scalar values better than FCAs. 1947 if (llvm::StructType *STy = 1948 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 1949 SrcPtr = Builder.CreateBitCast(SrcPtr, 1950 llvm::PointerType::getUnqual(STy)); 1951 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1952 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 1953 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 1954 // We don't know what we're loading from. 1955 LI->setAlignment(1); 1956 Args.push_back(LI); 1957 1958 // Validate argument match. 1959 checkArgMatches(LI, IRArgNo, IRFuncTy); 1960 } 1961 } else { 1962 // In the simple case, just pass the coerced loaded value. 1963 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 1964 *this)); 1965 1966 // Validate argument match. 1967 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 1968 } 1969 1970 break; 1971 } 1972 1973 case ABIArgInfo::Expand: 1974 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 1975 IRArgNo = Args.size(); 1976 break; 1977 } 1978 } 1979 1980 // If the callee is a bitcast of a function to a varargs pointer to function 1981 // type, check to see if we can remove the bitcast. This handles some cases 1982 // with unprototyped functions. 1983 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 1984 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 1985 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 1986 llvm::FunctionType *CurFT = 1987 cast<llvm::FunctionType>(CurPT->getElementType()); 1988 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 1989 1990 if (CE->getOpcode() == llvm::Instruction::BitCast && 1991 ActualFT->getReturnType() == CurFT->getReturnType() && 1992 ActualFT->getNumParams() == CurFT->getNumParams() && 1993 ActualFT->getNumParams() == Args.size() && 1994 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 1995 bool ArgsMatch = true; 1996 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 1997 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 1998 ArgsMatch = false; 1999 break; 2000 } 2001 2002 // Strip the cast if we can get away with it. This is a nice cleanup, 2003 // but also allows us to inline the function at -O0 if it is marked 2004 // always_inline. 2005 if (ArgsMatch) 2006 Callee = CalleeF; 2007 } 2008 } 2009 2010 unsigned CallingConv; 2011 CodeGen::AttributeListType AttributeList; 2012 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); 2013 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 2014 AttributeList.end()); 2015 2016 llvm::BasicBlock *InvokeDest = 0; 2017 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) 2018 InvokeDest = getInvokeDest(); 2019 2020 llvm::CallSite CS; 2021 if (!InvokeDest) { 2022 CS = Builder.CreateCall(Callee, Args); 2023 } else { 2024 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2025 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2026 EmitBlock(Cont); 2027 } 2028 if (callOrInvoke) 2029 *callOrInvoke = CS.getInstruction(); 2030 2031 CS.setAttributes(Attrs); 2032 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2033 2034 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2035 // optimizer it can aggressively ignore unwind edges. 2036 if (CGM.getLangOpts().ObjCAutoRefCount) 2037 AddObjCARCExceptionMetadata(CS.getInstruction()); 2038 2039 // If the call doesn't return, finish the basic block and clear the 2040 // insertion point; this allows the rest of IRgen to discard 2041 // unreachable code. 2042 if (CS.doesNotReturn()) { 2043 Builder.CreateUnreachable(); 2044 Builder.ClearInsertionPoint(); 2045 2046 // FIXME: For now, emit a dummy basic block because expr emitters in 2047 // generally are not ready to handle emitting expressions at unreachable 2048 // points. 2049 EnsureInsertPoint(); 2050 2051 // Return a reasonable RValue. 2052 return GetUndefRValue(RetTy); 2053 } 2054 2055 llvm::Instruction *CI = CS.getInstruction(); 2056 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2057 CI->setName("call"); 2058 2059 // Emit any writebacks immediately. Arguably this should happen 2060 // after any return-value munging. 2061 if (CallArgs.hasWritebacks()) 2062 emitWritebacks(*this, CallArgs); 2063 2064 switch (RetAI.getKind()) { 2065 case ABIArgInfo::Indirect: { 2066 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2067 if (RetTy->isAnyComplexType()) 2068 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2069 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2070 return RValue::getAggregate(Args[0]); 2071 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy)); 2072 } 2073 2074 case ABIArgInfo::Ignore: 2075 // If we are ignoring an argument that had a result, make sure to 2076 // construct the appropriate return value for our caller. 2077 return GetUndefRValue(RetTy); 2078 2079 case ABIArgInfo::Extend: 2080 case ABIArgInfo::Direct: { 2081 llvm::Type *RetIRTy = ConvertType(RetTy); 2082 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2083 if (RetTy->isAnyComplexType()) { 2084 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2085 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2086 return RValue::getComplex(std::make_pair(Real, Imag)); 2087 } 2088 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2089 llvm::Value *DestPtr = ReturnValue.getValue(); 2090 bool DestIsVolatile = ReturnValue.isVolatile(); 2091 2092 if (!DestPtr) { 2093 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2094 DestIsVolatile = false; 2095 } 2096 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2097 return RValue::getAggregate(DestPtr); 2098 } 2099 2100 // If the argument doesn't match, perform a bitcast to coerce it. This 2101 // can happen due to trivial type mismatches. 2102 llvm::Value *V = CI; 2103 if (V->getType() != RetIRTy) 2104 V = Builder.CreateBitCast(V, RetIRTy); 2105 return RValue::get(V); 2106 } 2107 2108 llvm::Value *DestPtr = ReturnValue.getValue(); 2109 bool DestIsVolatile = ReturnValue.isVolatile(); 2110 2111 if (!DestPtr) { 2112 DestPtr = CreateMemTemp(RetTy, "coerce"); 2113 DestIsVolatile = false; 2114 } 2115 2116 // If the value is offset in memory, apply the offset now. 2117 llvm::Value *StorePtr = DestPtr; 2118 if (unsigned Offs = RetAI.getDirectOffset()) { 2119 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2120 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2121 StorePtr = Builder.CreateBitCast(StorePtr, 2122 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2123 } 2124 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2125 2126 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity(); 2127 if (RetTy->isAnyComplexType()) 2128 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false)); 2129 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2130 return RValue::getAggregate(DestPtr); 2131 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy)); 2132 } 2133 2134 case ABIArgInfo::Expand: 2135 llvm_unreachable("Invalid ABI kind for return argument"); 2136 } 2137 2138 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2139} 2140 2141/* VarArg handling */ 2142 2143llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2144 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2145} 2146