CGCall.cpp revision 251662
1//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "ABIInfo.h" 17#include "CGCXXABI.h" 18#include "CodeGenFunction.h" 19#include "CodeGenModule.h" 20#include "TargetInfo.h" 21#include "clang/AST/Decl.h" 22#include "clang/AST/DeclCXX.h" 23#include "clang/AST/DeclObjC.h" 24#include "clang/Basic/TargetInfo.h" 25#include "clang/Frontend/CodeGenOptions.h" 26#include "llvm/ADT/StringExtras.h" 27#include "llvm/IR/Attributes.h" 28#include "llvm/IR/DataLayout.h" 29#include "llvm/IR/InlineAsm.h" 30#include "llvm/MC/SubtargetFeature.h" 31#include "llvm/Support/CallSite.h" 32#include "llvm/Transforms/Utils/Local.h" 33using namespace clang; 34using namespace CodeGen; 35 36/***/ 37 38static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 39 switch (CC) { 40 default: return llvm::CallingConv::C; 41 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 42 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 43 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 44 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 45 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 46 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 47 // TODO: add support for CC_X86Pascal to llvm 48 } 49} 50 51/// Derives the 'this' type for codegen purposes, i.e. ignoring method 52/// qualification. 53/// FIXME: address space qualification? 54static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 55 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 56 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 57} 58 59/// Returns the canonical formal type of the given C++ method. 60static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 61 return MD->getType()->getCanonicalTypeUnqualified() 62 .getAs<FunctionProtoType>(); 63} 64 65/// Returns the "extra-canonicalized" return type, which discards 66/// qualifiers on the return type. Codegen doesn't care about them, 67/// and it makes ABI code a little easier to be able to assume that 68/// all parameter and return types are top-level unqualified. 69static CanQualType GetReturnType(QualType RetTy) { 70 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 71} 72 73/// Arrange the argument and result information for a value of the given 74/// unprototyped freestanding function type. 75const CGFunctionInfo & 76CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 77 // When translating an unprototyped function type, always use a 78 // variadic type. 79 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 80 None, FTNP->getExtInfo(), RequiredArgs(0)); 81} 82 83/// Arrange the LLVM function layout for a value of the given function 84/// type, on top of any implicit parameters already stored. Use the 85/// given ExtInfo instead of the ExtInfo from the function type. 86static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 87 SmallVectorImpl<CanQualType> &prefix, 88 CanQual<FunctionProtoType> FTP, 89 FunctionType::ExtInfo extInfo) { 90 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 91 // FIXME: Kill copy. 92 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 93 prefix.push_back(FTP->getArgType(i)); 94 CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 95 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 96} 97 98/// Arrange the argument and result information for a free function (i.e. 99/// not a C++ or ObjC instance method) of the given type. 100static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 101 SmallVectorImpl<CanQualType> &prefix, 102 CanQual<FunctionProtoType> FTP) { 103 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 104} 105 106/// Given the formal ext-info of a C++ instance method, adjust it 107/// according to the C++ ABI in effect. 108static void adjustCXXMethodInfo(CodeGenTypes &CGT, 109 FunctionType::ExtInfo &extInfo, 110 bool isVariadic) { 111 if (extInfo.getCC() == CC_Default) { 112 CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 113 extInfo = extInfo.withCallingConv(CC); 114 } 115} 116 117/// Arrange the argument and result information for a free function (i.e. 118/// not a C++ or ObjC instance method) of the given type. 119static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 120 SmallVectorImpl<CanQualType> &prefix, 121 CanQual<FunctionProtoType> FTP) { 122 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 123 adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 124 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 125} 126 127/// Arrange the argument and result information for a value of the 128/// given freestanding function type. 129const CGFunctionInfo & 130CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 131 SmallVector<CanQualType, 16> argTypes; 132 return ::arrangeFreeFunctionType(*this, argTypes, FTP); 133} 134 135static CallingConv getCallingConventionForDecl(const Decl *D) { 136 // Set the appropriate calling convention for the Function. 137 if (D->hasAttr<StdCallAttr>()) 138 return CC_X86StdCall; 139 140 if (D->hasAttr<FastCallAttr>()) 141 return CC_X86FastCall; 142 143 if (D->hasAttr<ThisCallAttr>()) 144 return CC_X86ThisCall; 145 146 if (D->hasAttr<PascalAttr>()) 147 return CC_X86Pascal; 148 149 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 150 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 151 152 if (D->hasAttr<PnaclCallAttr>()) 153 return CC_PnaclCall; 154 155 if (D->hasAttr<IntelOclBiccAttr>()) 156 return CC_IntelOclBicc; 157 158 return CC_C; 159} 160 161/// Arrange the argument and result information for a call to an 162/// unknown C++ non-static member function of the given abstract type. 163/// The member function must be an ordinary function, i.e. not a 164/// constructor or destructor. 165const CGFunctionInfo & 166CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 167 const FunctionProtoType *FTP) { 168 SmallVector<CanQualType, 16> argTypes; 169 170 // Add the 'this' pointer. 171 argTypes.push_back(GetThisType(Context, RD)); 172 173 return ::arrangeCXXMethodType(*this, argTypes, 174 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 175} 176 177/// Arrange the argument and result information for a declaration or 178/// definition of the given C++ non-static member function. The 179/// member function must be an ordinary function, i.e. not a 180/// constructor or destructor. 181const CGFunctionInfo & 182CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 183 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 184 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 185 186 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 187 188 if (MD->isInstance()) { 189 // The abstract case is perfectly fine. 190 return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 191 } 192 193 return arrangeFreeFunctionType(prototype); 194} 195 196/// Arrange the argument and result information for a declaration 197/// or definition to the given constructor variant. 198const CGFunctionInfo & 199CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 200 CXXCtorType ctorKind) { 201 SmallVector<CanQualType, 16> argTypes; 202 argTypes.push_back(GetThisType(Context, D->getParent())); 203 CanQualType resultType = Context.VoidTy; 204 205 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 206 207 CanQual<FunctionProtoType> FTP = GetFormalType(D); 208 209 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 210 211 // Add the formal parameters. 212 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 213 argTypes.push_back(FTP->getArgType(i)); 214 215 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 216 adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 217 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 218} 219 220/// Arrange the argument and result information for a declaration, 221/// definition, or call to the given destructor variant. It so 222/// happens that all three cases produce the same information. 223const CGFunctionInfo & 224CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 225 CXXDtorType dtorKind) { 226 SmallVector<CanQualType, 2> argTypes; 227 argTypes.push_back(GetThisType(Context, D->getParent())); 228 CanQualType resultType = Context.VoidTy; 229 230 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 231 232 CanQual<FunctionProtoType> FTP = GetFormalType(D); 233 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 234 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 235 236 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 237 adjustCXXMethodInfo(*this, extInfo, false); 238 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 239 RequiredArgs::All); 240} 241 242/// Arrange the argument and result information for the declaration or 243/// definition of the given function. 244const CGFunctionInfo & 245CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 246 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 247 if (MD->isInstance()) 248 return arrangeCXXMethodDeclaration(MD); 249 250 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 251 252 assert(isa<FunctionType>(FTy)); 253 254 // When declaring a function without a prototype, always use a 255 // non-variadic type. 256 if (isa<FunctionNoProtoType>(FTy)) { 257 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 258 return arrangeLLVMFunctionInfo(noProto->getResultType(), None, 259 noProto->getExtInfo(), RequiredArgs::All); 260 } 261 262 assert(isa<FunctionProtoType>(FTy)); 263 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 264} 265 266/// Arrange the argument and result information for the declaration or 267/// definition of an Objective-C method. 268const CGFunctionInfo & 269CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 270 // It happens that this is the same as a call with no optional 271 // arguments, except also using the formal 'self' type. 272 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 273} 274 275/// Arrange the argument and result information for the function type 276/// through which to perform a send to the given Objective-C method, 277/// using the given receiver type. The receiver type is not always 278/// the 'self' type of the method or even an Objective-C pointer type. 279/// This is *not* the right method for actually performing such a 280/// message send, due to the possibility of optional arguments. 281const CGFunctionInfo & 282CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 283 QualType receiverType) { 284 SmallVector<CanQualType, 16> argTys; 285 argTys.push_back(Context.getCanonicalParamType(receiverType)); 286 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 287 // FIXME: Kill copy? 288 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 289 e = MD->param_end(); i != e; ++i) { 290 argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 291 } 292 293 FunctionType::ExtInfo einfo; 294 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 295 296 if (getContext().getLangOpts().ObjCAutoRefCount && 297 MD->hasAttr<NSReturnsRetainedAttr>()) 298 einfo = einfo.withProducesResult(true); 299 300 RequiredArgs required = 301 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 302 303 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 304 einfo, required); 305} 306 307const CGFunctionInfo & 308CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 309 // FIXME: Do we need to handle ObjCMethodDecl? 310 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 311 312 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 313 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 314 315 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 316 return arrangeCXXDestructor(DD, GD.getDtorType()); 317 318 return arrangeFunctionDeclaration(FD); 319} 320 321/// Arrange a call as unto a free function, except possibly with an 322/// additional number of formal parameters considered required. 323static const CGFunctionInfo & 324arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 325 const CallArgList &args, 326 const FunctionType *fnType, 327 unsigned numExtraRequiredArgs) { 328 assert(args.size() >= numExtraRequiredArgs); 329 330 // In most cases, there are no optional arguments. 331 RequiredArgs required = RequiredArgs::All; 332 333 // If we have a variadic prototype, the required arguments are the 334 // extra prefix plus the arguments in the prototype. 335 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 336 if (proto->isVariadic()) 337 required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs); 338 339 // If we don't have a prototype at all, but we're supposed to 340 // explicitly use the variadic convention for unprototyped calls, 341 // treat all of the arguments as required but preserve the nominal 342 // possibility of variadics. 343 } else if (CGT.CGM.getTargetCodeGenInfo() 344 .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 345 required = RequiredArgs(args.size()); 346 } 347 348 return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args, 349 fnType->getExtInfo(), required); 350} 351 352/// Figure out the rules for calling a function with the given formal 353/// type using the given arguments. The arguments are necessary 354/// because the function might be unprototyped, in which case it's 355/// target-dependent in crazy ways. 356const CGFunctionInfo & 357CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 358 const FunctionType *fnType) { 359 return arrangeFreeFunctionLikeCall(*this, args, fnType, 0); 360} 361 362/// A block function call is essentially a free-function call with an 363/// extra implicit argument. 364const CGFunctionInfo & 365CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 366 const FunctionType *fnType) { 367 return arrangeFreeFunctionLikeCall(*this, args, fnType, 1); 368} 369 370const CGFunctionInfo & 371CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 372 const CallArgList &args, 373 FunctionType::ExtInfo info, 374 RequiredArgs required) { 375 // FIXME: Kill copy. 376 SmallVector<CanQualType, 16> argTypes; 377 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 378 i != e; ++i) 379 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 380 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 381 required); 382} 383 384/// Arrange a call to a C++ method, passing the given arguments. 385const CGFunctionInfo & 386CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 387 const FunctionProtoType *FPT, 388 RequiredArgs required) { 389 // FIXME: Kill copy. 390 SmallVector<CanQualType, 16> argTypes; 391 for (CallArgList::const_iterator i = args.begin(), e = args.end(); 392 i != e; ++i) 393 argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 394 395 FunctionType::ExtInfo info = FPT->getExtInfo(); 396 adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 397 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 398 argTypes, info, required); 399} 400 401const CGFunctionInfo & 402CodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 403 const FunctionArgList &args, 404 const FunctionType::ExtInfo &info, 405 bool isVariadic) { 406 // FIXME: Kill copy. 407 SmallVector<CanQualType, 16> argTypes; 408 for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 409 i != e; ++i) 410 argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 411 412 RequiredArgs required = 413 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 414 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 415 required); 416} 417 418const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 419 return arrangeLLVMFunctionInfo(getContext().VoidTy, None, 420 FunctionType::ExtInfo(), RequiredArgs::All); 421} 422 423/// Arrange the argument and result information for an abstract value 424/// of a given function type. This is the method which all of the 425/// above functions ultimately defer to. 426const CGFunctionInfo & 427CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 428 ArrayRef<CanQualType> argTypes, 429 FunctionType::ExtInfo info, 430 RequiredArgs required) { 431#ifndef NDEBUG 432 for (ArrayRef<CanQualType>::const_iterator 433 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 434 assert(I->isCanonicalAsParam()); 435#endif 436 437 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 438 439 // Lookup or create unique function info. 440 llvm::FoldingSetNodeID ID; 441 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 442 443 void *insertPos = 0; 444 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 445 if (FI) 446 return *FI; 447 448 // Construct the function info. We co-allocate the ArgInfos. 449 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 450 FunctionInfos.InsertNode(FI, insertPos); 451 452 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 453 assert(inserted && "Recursively being processed?"); 454 455 // Compute ABI information. 456 getABIInfo().computeInfo(*FI); 457 458 // Loop over all of the computed argument and return value info. If any of 459 // them are direct or extend without a specified coerce type, specify the 460 // default now. 461 ABIArgInfo &retInfo = FI->getReturnInfo(); 462 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 463 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 464 465 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 466 I != E; ++I) 467 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 468 I->info.setCoerceToType(ConvertType(I->type)); 469 470 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 471 assert(erased && "Not in set?"); 472 473 return *FI; 474} 475 476CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 477 const FunctionType::ExtInfo &info, 478 CanQualType resultType, 479 ArrayRef<CanQualType> argTypes, 480 RequiredArgs required) { 481 void *buffer = operator new(sizeof(CGFunctionInfo) + 482 sizeof(ArgInfo) * (argTypes.size() + 1)); 483 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 484 FI->CallingConvention = llvmCC; 485 FI->EffectiveCallingConvention = llvmCC; 486 FI->ASTCallingConvention = info.getCC(); 487 FI->NoReturn = info.getNoReturn(); 488 FI->ReturnsRetained = info.getProducesResult(); 489 FI->Required = required; 490 FI->HasRegParm = info.getHasRegParm(); 491 FI->RegParm = info.getRegParm(); 492 FI->NumArgs = argTypes.size(); 493 FI->getArgsBuffer()[0].type = resultType; 494 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 495 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 496 return FI; 497} 498 499/***/ 500 501void CodeGenTypes::GetExpandedTypes(QualType type, 502 SmallVectorImpl<llvm::Type*> &expandedTypes) { 503 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 504 uint64_t NumElts = AT->getSize().getZExtValue(); 505 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 506 GetExpandedTypes(AT->getElementType(), expandedTypes); 507 } else if (const RecordType *RT = type->getAs<RecordType>()) { 508 const RecordDecl *RD = RT->getDecl(); 509 assert(!RD->hasFlexibleArrayMember() && 510 "Cannot expand structure with flexible array."); 511 if (RD->isUnion()) { 512 // Unions can be here only in degenerative cases - all the fields are same 513 // after flattening. Thus we have to use the "largest" field. 514 const FieldDecl *LargestFD = 0; 515 CharUnits UnionSize = CharUnits::Zero(); 516 517 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 518 i != e; ++i) { 519 const FieldDecl *FD = *i; 520 assert(!FD->isBitField() && 521 "Cannot expand structure with bit-field members."); 522 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 523 if (UnionSize < FieldSize) { 524 UnionSize = FieldSize; 525 LargestFD = FD; 526 } 527 } 528 if (LargestFD) 529 GetExpandedTypes(LargestFD->getType(), expandedTypes); 530 } else { 531 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 532 i != e; ++i) { 533 assert(!i->isBitField() && 534 "Cannot expand structure with bit-field members."); 535 GetExpandedTypes(i->getType(), expandedTypes); 536 } 537 } 538 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 539 llvm::Type *EltTy = ConvertType(CT->getElementType()); 540 expandedTypes.push_back(EltTy); 541 expandedTypes.push_back(EltTy); 542 } else 543 expandedTypes.push_back(ConvertType(type)); 544} 545 546llvm::Function::arg_iterator 547CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 548 llvm::Function::arg_iterator AI) { 549 assert(LV.isSimple() && 550 "Unexpected non-simple lvalue during struct expansion."); 551 552 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 553 unsigned NumElts = AT->getSize().getZExtValue(); 554 QualType EltTy = AT->getElementType(); 555 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 556 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 557 LValue LV = MakeAddrLValue(EltAddr, EltTy); 558 AI = ExpandTypeFromArgs(EltTy, LV, AI); 559 } 560 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 561 RecordDecl *RD = RT->getDecl(); 562 if (RD->isUnion()) { 563 // Unions can be here only in degenerative cases - all the fields are same 564 // after flattening. Thus we have to use the "largest" field. 565 const FieldDecl *LargestFD = 0; 566 CharUnits UnionSize = CharUnits::Zero(); 567 568 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 569 i != e; ++i) { 570 const FieldDecl *FD = *i; 571 assert(!FD->isBitField() && 572 "Cannot expand structure with bit-field members."); 573 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 574 if (UnionSize < FieldSize) { 575 UnionSize = FieldSize; 576 LargestFD = FD; 577 } 578 } 579 if (LargestFD) { 580 // FIXME: What are the right qualifiers here? 581 LValue SubLV = EmitLValueForField(LV, LargestFD); 582 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 583 } 584 } else { 585 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 586 i != e; ++i) { 587 FieldDecl *FD = *i; 588 QualType FT = FD->getType(); 589 590 // FIXME: What are the right qualifiers here? 591 LValue SubLV = EmitLValueForField(LV, FD); 592 AI = ExpandTypeFromArgs(FT, SubLV, AI); 593 } 594 } 595 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 596 QualType EltTy = CT->getElementType(); 597 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 598 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 599 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 600 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 601 } else { 602 EmitStoreThroughLValue(RValue::get(AI), LV); 603 ++AI; 604 } 605 606 return AI; 607} 608 609/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 610/// accessing some number of bytes out of it, try to gep into the struct to get 611/// at its inner goodness. Dive as deep as possible without entering an element 612/// with an in-memory size smaller than DstSize. 613static llvm::Value * 614EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 615 llvm::StructType *SrcSTy, 616 uint64_t DstSize, CodeGenFunction &CGF) { 617 // We can't dive into a zero-element struct. 618 if (SrcSTy->getNumElements() == 0) return SrcPtr; 619 620 llvm::Type *FirstElt = SrcSTy->getElementType(0); 621 622 // If the first elt is at least as large as what we're looking for, or if the 623 // first element is the same size as the whole struct, we can enter it. 624 uint64_t FirstEltSize = 625 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 626 if (FirstEltSize < DstSize && 627 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 628 return SrcPtr; 629 630 // GEP into the first element. 631 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 632 633 // If the first element is a struct, recurse. 634 llvm::Type *SrcTy = 635 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 636 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 637 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 638 639 return SrcPtr; 640} 641 642/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 643/// are either integers or pointers. This does a truncation of the value if it 644/// is too large or a zero extension if it is too small. 645static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 646 llvm::Type *Ty, 647 CodeGenFunction &CGF) { 648 if (Val->getType() == Ty) 649 return Val; 650 651 if (isa<llvm::PointerType>(Val->getType())) { 652 // If this is Pointer->Pointer avoid conversion to and from int. 653 if (isa<llvm::PointerType>(Ty)) 654 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 655 656 // Convert the pointer to an integer so we can play with its width. 657 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 658 } 659 660 llvm::Type *DestIntTy = Ty; 661 if (isa<llvm::PointerType>(DestIntTy)) 662 DestIntTy = CGF.IntPtrTy; 663 664 if (Val->getType() != DestIntTy) 665 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 666 667 if (isa<llvm::PointerType>(Ty)) 668 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 669 return Val; 670} 671 672 673 674/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 675/// a pointer to an object of type \arg Ty. 676/// 677/// This safely handles the case when the src type is smaller than the 678/// destination type; in this situation the values of bits which not 679/// present in the src are undefined. 680static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 681 llvm::Type *Ty, 682 CodeGenFunction &CGF) { 683 llvm::Type *SrcTy = 684 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 685 686 // If SrcTy and Ty are the same, just do a load. 687 if (SrcTy == Ty) 688 return CGF.Builder.CreateLoad(SrcPtr); 689 690 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 691 692 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 693 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 694 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 695 } 696 697 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 698 699 // If the source and destination are integer or pointer types, just do an 700 // extension or truncation to the desired type. 701 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 702 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 703 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 704 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 705 } 706 707 // If load is legal, just bitcast the src pointer. 708 if (SrcSize >= DstSize) { 709 // Generally SrcSize is never greater than DstSize, since this means we are 710 // losing bits. However, this can happen in cases where the structure has 711 // additional padding, for example due to a user specified alignment. 712 // 713 // FIXME: Assert that we aren't truncating non-padding bits when have access 714 // to that information. 715 llvm::Value *Casted = 716 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 717 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 718 // FIXME: Use better alignment / avoid requiring aligned load. 719 Load->setAlignment(1); 720 return Load; 721 } 722 723 // Otherwise do coercion through memory. This is stupid, but 724 // simple. 725 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 726 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 727 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 728 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 729 // FIXME: Use better alignment. 730 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 731 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 732 1, false); 733 return CGF.Builder.CreateLoad(Tmp); 734} 735 736// Function to store a first-class aggregate into memory. We prefer to 737// store the elements rather than the aggregate to be more friendly to 738// fast-isel. 739// FIXME: Do we need to recurse here? 740static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 741 llvm::Value *DestPtr, bool DestIsVolatile, 742 bool LowAlignment) { 743 // Prefer scalar stores to first-class aggregate stores. 744 if (llvm::StructType *STy = 745 dyn_cast<llvm::StructType>(Val->getType())) { 746 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 747 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 748 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 749 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 750 DestIsVolatile); 751 if (LowAlignment) 752 SI->setAlignment(1); 753 } 754 } else { 755 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 756 if (LowAlignment) 757 SI->setAlignment(1); 758 } 759} 760 761/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 762/// where the source and destination may have different types. 763/// 764/// This safely handles the case when the src type is larger than the 765/// destination type; the upper bits of the src will be lost. 766static void CreateCoercedStore(llvm::Value *Src, 767 llvm::Value *DstPtr, 768 bool DstIsVolatile, 769 CodeGenFunction &CGF) { 770 llvm::Type *SrcTy = Src->getType(); 771 llvm::Type *DstTy = 772 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 773 if (SrcTy == DstTy) { 774 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 775 return; 776 } 777 778 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 779 780 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 781 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 782 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 783 } 784 785 // If the source and destination are integer or pointer types, just do an 786 // extension or truncation to the desired type. 787 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 788 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 789 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 790 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 791 return; 792 } 793 794 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 795 796 // If store is legal, just bitcast the src pointer. 797 if (SrcSize <= DstSize) { 798 llvm::Value *Casted = 799 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 800 // FIXME: Use better alignment / avoid requiring aligned store. 801 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 802 } else { 803 // Otherwise do coercion through memory. This is stupid, but 804 // simple. 805 806 // Generally SrcSize is never greater than DstSize, since this means we are 807 // losing bits. However, this can happen in cases where the structure has 808 // additional padding, for example due to a user specified alignment. 809 // 810 // FIXME: Assert that we aren't truncating non-padding bits when have access 811 // to that information. 812 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 813 CGF.Builder.CreateStore(Src, Tmp); 814 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 815 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 816 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 817 // FIXME: Use better alignment. 818 CGF.Builder.CreateMemCpy(DstCasted, Casted, 819 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 820 1, false); 821 } 822} 823 824/***/ 825 826bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 827 return FI.getReturnInfo().isIndirect(); 828} 829 830bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 831 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 832 switch (BT->getKind()) { 833 default: 834 return false; 835 case BuiltinType::Float: 836 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 837 case BuiltinType::Double: 838 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 839 case BuiltinType::LongDouble: 840 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 841 } 842 } 843 844 return false; 845} 846 847bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 848 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 849 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 850 if (BT->getKind() == BuiltinType::LongDouble) 851 return getTarget().useObjCFP2RetForComplexLongDouble(); 852 } 853 } 854 855 return false; 856} 857 858llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 859 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 860 return GetFunctionType(FI); 861} 862 863llvm::FunctionType * 864CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 865 866 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 867 assert(Inserted && "Recursively being processed?"); 868 869 SmallVector<llvm::Type*, 8> argTypes; 870 llvm::Type *resultType = 0; 871 872 const ABIArgInfo &retAI = FI.getReturnInfo(); 873 switch (retAI.getKind()) { 874 case ABIArgInfo::Expand: 875 llvm_unreachable("Invalid ABI kind for return argument"); 876 877 case ABIArgInfo::Extend: 878 case ABIArgInfo::Direct: 879 resultType = retAI.getCoerceToType(); 880 break; 881 882 case ABIArgInfo::Indirect: { 883 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 884 resultType = llvm::Type::getVoidTy(getLLVMContext()); 885 886 QualType ret = FI.getReturnType(); 887 llvm::Type *ty = ConvertType(ret); 888 unsigned addressSpace = Context.getTargetAddressSpace(ret); 889 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 890 break; 891 } 892 893 case ABIArgInfo::Ignore: 894 resultType = llvm::Type::getVoidTy(getLLVMContext()); 895 break; 896 } 897 898 // Add in all of the required arguments. 899 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 900 if (FI.isVariadic()) { 901 ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 902 } else { 903 ie = FI.arg_end(); 904 } 905 for (; it != ie; ++it) { 906 const ABIArgInfo &argAI = it->info; 907 908 // Insert a padding type to ensure proper alignment. 909 if (llvm::Type *PaddingType = argAI.getPaddingType()) 910 argTypes.push_back(PaddingType); 911 912 switch (argAI.getKind()) { 913 case ABIArgInfo::Ignore: 914 break; 915 916 case ABIArgInfo::Indirect: { 917 // indirect arguments are always on the stack, which is addr space #0. 918 llvm::Type *LTy = ConvertTypeForMem(it->type); 919 argTypes.push_back(LTy->getPointerTo()); 920 break; 921 } 922 923 case ABIArgInfo::Extend: 924 case ABIArgInfo::Direct: { 925 // If the coerce-to type is a first class aggregate, flatten it. Either 926 // way is semantically identical, but fast-isel and the optimizer 927 // generally likes scalar values better than FCAs. 928 llvm::Type *argType = argAI.getCoerceToType(); 929 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 930 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 931 argTypes.push_back(st->getElementType(i)); 932 } else { 933 argTypes.push_back(argType); 934 } 935 break; 936 } 937 938 case ABIArgInfo::Expand: 939 GetExpandedTypes(it->type, argTypes); 940 break; 941 } 942 } 943 944 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 945 assert(Erased && "Not in set?"); 946 947 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 948} 949 950llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 951 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 952 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 953 954 if (!isFuncTypeConvertible(FPT)) 955 return llvm::StructType::get(getLLVMContext()); 956 957 const CGFunctionInfo *Info; 958 if (isa<CXXDestructorDecl>(MD)) 959 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 960 else 961 Info = &arrangeCXXMethodDeclaration(MD); 962 return GetFunctionType(*Info); 963} 964 965void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 966 const Decl *TargetDecl, 967 AttributeListType &PAL, 968 unsigned &CallingConv, 969 bool AttrOnCallSite) { 970 llvm::AttrBuilder FuncAttrs; 971 llvm::AttrBuilder RetAttrs; 972 973 CallingConv = FI.getEffectiveCallingConvention(); 974 975 if (FI.isNoReturn()) 976 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 977 978 // FIXME: handle sseregparm someday... 979 if (TargetDecl) { 980 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 981 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 982 if (TargetDecl->hasAttr<NoThrowAttr>()) 983 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 984 if (TargetDecl->hasAttr<NoReturnAttr>()) 985 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 986 987 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 988 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 989 if (FPT && FPT->isNothrow(getContext())) 990 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 991 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 992 // These attributes are not inherited by overloads. 993 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 994 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 995 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 996 } 997 998 // 'const' and 'pure' attribute functions are also nounwind. 999 if (TargetDecl->hasAttr<ConstAttr>()) { 1000 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1001 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1002 } else if (TargetDecl->hasAttr<PureAttr>()) { 1003 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1004 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1005 } 1006 if (TargetDecl->hasAttr<MallocAttr>()) 1007 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1008 } 1009 1010 if (CodeGenOpts.OptimizeSize) 1011 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1012 if (CodeGenOpts.OptimizeSize == 2) 1013 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1014 if (CodeGenOpts.DisableRedZone) 1015 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1016 if (CodeGenOpts.NoImplicitFloat) 1017 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1018 1019 if (AttrOnCallSite) { 1020 // Attributes that should go on the call site only. 1021 if (!CodeGenOpts.SimplifyLibCalls) 1022 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1023 } else { 1024 // Attributes that should go on the function, but not the call site. 1025 if (!CodeGenOpts.DisableFPElim) { 1026 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1027 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false"); 1028 } else if (CodeGenOpts.OmitLeafFramePointer) { 1029 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1030 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true"); 1031 } else { 1032 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1033 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true"); 1034 } 1035 1036 FuncAttrs.addAttribute("less-precise-fpmad", 1037 CodeGenOpts.LessPreciseFPMAD ? "true" : "false"); 1038 FuncAttrs.addAttribute("no-infs-fp-math", 1039 CodeGenOpts.NoInfsFPMath ? "true" : "false"); 1040 FuncAttrs.addAttribute("no-nans-fp-math", 1041 CodeGenOpts.NoNaNsFPMath ? "true" : "false"); 1042 FuncAttrs.addAttribute("unsafe-fp-math", 1043 CodeGenOpts.UnsafeFPMath ? "true" : "false"); 1044 FuncAttrs.addAttribute("use-soft-float", 1045 CodeGenOpts.SoftFloat ? "true" : "false"); 1046 } 1047 1048 QualType RetTy = FI.getReturnType(); 1049 unsigned Index = 1; 1050 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1051 switch (RetAI.getKind()) { 1052 case ABIArgInfo::Extend: 1053 if (RetTy->hasSignedIntegerRepresentation()) 1054 RetAttrs.addAttribute(llvm::Attribute::SExt); 1055 else if (RetTy->hasUnsignedIntegerRepresentation()) 1056 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1057 break; 1058 case ABIArgInfo::Direct: 1059 case ABIArgInfo::Ignore: 1060 break; 1061 1062 case ABIArgInfo::Indirect: { 1063 llvm::AttrBuilder SRETAttrs; 1064 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1065 if (RetAI.getInReg()) 1066 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1067 PAL.push_back(llvm:: 1068 AttributeSet::get(getLLVMContext(), Index, SRETAttrs)); 1069 1070 ++Index; 1071 // sret disables readnone and readonly 1072 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1073 .removeAttribute(llvm::Attribute::ReadNone); 1074 break; 1075 } 1076 1077 case ABIArgInfo::Expand: 1078 llvm_unreachable("Invalid ABI kind for return argument"); 1079 } 1080 1081 if (RetAttrs.hasAttributes()) 1082 PAL.push_back(llvm:: 1083 AttributeSet::get(getLLVMContext(), 1084 llvm::AttributeSet::ReturnIndex, 1085 RetAttrs)); 1086 1087 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1088 ie = FI.arg_end(); it != ie; ++it) { 1089 QualType ParamType = it->type; 1090 const ABIArgInfo &AI = it->info; 1091 llvm::AttrBuilder Attrs; 1092 1093 if (AI.getPaddingType()) { 1094 if (AI.getPaddingInReg()) 1095 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, 1096 llvm::Attribute::InReg)); 1097 // Increment Index if there is padding. 1098 ++Index; 1099 } 1100 1101 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1102 // have the corresponding parameter variable. It doesn't make 1103 // sense to do it here because parameters are so messed up. 1104 switch (AI.getKind()) { 1105 case ABIArgInfo::Extend: 1106 if (ParamType->isSignedIntegerOrEnumerationType()) 1107 Attrs.addAttribute(llvm::Attribute::SExt); 1108 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1109 Attrs.addAttribute(llvm::Attribute::ZExt); 1110 // FALL THROUGH 1111 case ABIArgInfo::Direct: 1112 if (AI.getInReg()) 1113 Attrs.addAttribute(llvm::Attribute::InReg); 1114 1115 // FIXME: handle sseregparm someday... 1116 1117 if (llvm::StructType *STy = 1118 dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1119 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1120 if (Attrs.hasAttributes()) 1121 for (unsigned I = 0; I < Extra; ++I) 1122 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I, 1123 Attrs)); 1124 Index += Extra; 1125 } 1126 break; 1127 1128 case ABIArgInfo::Indirect: 1129 if (AI.getInReg()) 1130 Attrs.addAttribute(llvm::Attribute::InReg); 1131 1132 if (AI.getIndirectByVal()) 1133 Attrs.addAttribute(llvm::Attribute::ByVal); 1134 1135 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1136 1137 // byval disables readnone and readonly. 1138 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1139 .removeAttribute(llvm::Attribute::ReadNone); 1140 break; 1141 1142 case ABIArgInfo::Ignore: 1143 // Skip increment, no matching LLVM parameter. 1144 continue; 1145 1146 case ABIArgInfo::Expand: { 1147 SmallVector<llvm::Type*, 8> types; 1148 // FIXME: This is rather inefficient. Do we ever actually need to do 1149 // anything here? The result should be just reconstructed on the other 1150 // side, so extension should be a non-issue. 1151 getTypes().GetExpandedTypes(ParamType, types); 1152 Index += types.size(); 1153 continue; 1154 } 1155 } 1156 1157 if (Attrs.hasAttributes()) 1158 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1159 ++Index; 1160 } 1161 if (FuncAttrs.hasAttributes()) 1162 PAL.push_back(llvm:: 1163 AttributeSet::get(getLLVMContext(), 1164 llvm::AttributeSet::FunctionIndex, 1165 FuncAttrs)); 1166} 1167 1168/// An argument came in as a promoted argument; demote it back to its 1169/// declared type. 1170static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1171 const VarDecl *var, 1172 llvm::Value *value) { 1173 llvm::Type *varType = CGF.ConvertType(var->getType()); 1174 1175 // This can happen with promotions that actually don't change the 1176 // underlying type, like the enum promotions. 1177 if (value->getType() == varType) return value; 1178 1179 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1180 && "unexpected promotion type"); 1181 1182 if (isa<llvm::IntegerType>(varType)) 1183 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1184 1185 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1186} 1187 1188void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1189 llvm::Function *Fn, 1190 const FunctionArgList &Args) { 1191 // If this is an implicit-return-zero function, go ahead and 1192 // initialize the return value. TODO: it might be nice to have 1193 // a more general mechanism for this that didn't require synthesized 1194 // return statements. 1195 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1196 if (FD->hasImplicitReturnZero()) { 1197 QualType RetTy = FD->getResultType().getUnqualifiedType(); 1198 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1199 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1200 Builder.CreateStore(Zero, ReturnValue); 1201 } 1202 } 1203 1204 // FIXME: We no longer need the types from FunctionArgList; lift up and 1205 // simplify. 1206 1207 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1208 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1209 1210 // Name the struct return argument. 1211 if (CGM.ReturnTypeUsesSRet(FI)) { 1212 AI->setName("agg.result"); 1213 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1214 AI->getArgNo() + 1, 1215 llvm::Attribute::NoAlias)); 1216 ++AI; 1217 } 1218 1219 assert(FI.arg_size() == Args.size() && 1220 "Mismatch between function signature & arguments."); 1221 unsigned ArgNo = 1; 1222 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1223 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1224 i != e; ++i, ++info_it, ++ArgNo) { 1225 const VarDecl *Arg = *i; 1226 QualType Ty = info_it->type; 1227 const ABIArgInfo &ArgI = info_it->info; 1228 1229 bool isPromoted = 1230 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1231 1232 // Skip the dummy padding argument. 1233 if (ArgI.getPaddingType()) 1234 ++AI; 1235 1236 switch (ArgI.getKind()) { 1237 case ABIArgInfo::Indirect: { 1238 llvm::Value *V = AI; 1239 1240 if (!hasScalarEvaluationKind(Ty)) { 1241 // Aggregates and complex variables are accessed by reference. All we 1242 // need to do is realign the value, if requested 1243 if (ArgI.getIndirectRealign()) { 1244 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1245 1246 // Copy from the incoming argument pointer to the temporary with the 1247 // appropriate alignment. 1248 // 1249 // FIXME: We should have a common utility for generating an aggregate 1250 // copy. 1251 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1252 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1253 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1254 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1255 Builder.CreateMemCpy(Dst, 1256 Src, 1257 llvm::ConstantInt::get(IntPtrTy, 1258 Size.getQuantity()), 1259 ArgI.getIndirectAlign(), 1260 false); 1261 V = AlignedTemp; 1262 } 1263 } else { 1264 // Load scalar value from indirect argument. 1265 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1266 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1267 1268 if (isPromoted) 1269 V = emitArgumentDemotion(*this, Arg, V); 1270 } 1271 EmitParmDecl(*Arg, V, ArgNo); 1272 break; 1273 } 1274 1275 case ABIArgInfo::Extend: 1276 case ABIArgInfo::Direct: { 1277 1278 // If we have the trivial case, handle it with no muss and fuss. 1279 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1280 ArgI.getCoerceToType() == ConvertType(Ty) && 1281 ArgI.getDirectOffset() == 0) { 1282 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1283 llvm::Value *V = AI; 1284 1285 if (Arg->getType().isRestrictQualified()) 1286 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1287 AI->getArgNo() + 1, 1288 llvm::Attribute::NoAlias)); 1289 1290 // Ensure the argument is the correct type. 1291 if (V->getType() != ArgI.getCoerceToType()) 1292 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1293 1294 if (isPromoted) 1295 V = emitArgumentDemotion(*this, Arg, V); 1296 1297 // Because of merging of function types from multiple decls it is 1298 // possible for the type of an argument to not match the corresponding 1299 // type in the function type. Since we are codegening the callee 1300 // in here, add a cast to the argument type. 1301 llvm::Type *LTy = ConvertType(Arg->getType()); 1302 if (V->getType() != LTy) 1303 V = Builder.CreateBitCast(V, LTy); 1304 1305 EmitParmDecl(*Arg, V, ArgNo); 1306 break; 1307 } 1308 1309 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1310 1311 // The alignment we need to use is the max of the requested alignment for 1312 // the argument plus the alignment required by our access code below. 1313 unsigned AlignmentToUse = 1314 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1315 AlignmentToUse = std::max(AlignmentToUse, 1316 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1317 1318 Alloca->setAlignment(AlignmentToUse); 1319 llvm::Value *V = Alloca; 1320 llvm::Value *Ptr = V; // Pointer to store into. 1321 1322 // If the value is offset in memory, apply the offset now. 1323 if (unsigned Offs = ArgI.getDirectOffset()) { 1324 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1325 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1326 Ptr = Builder.CreateBitCast(Ptr, 1327 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1328 } 1329 1330 // If the coerce-to type is a first class aggregate, we flatten it and 1331 // pass the elements. Either way is semantically identical, but fast-isel 1332 // and the optimizer generally likes scalar values better than FCAs. 1333 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1334 if (STy && STy->getNumElements() > 1) { 1335 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1336 llvm::Type *DstTy = 1337 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1338 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1339 1340 if (SrcSize <= DstSize) { 1341 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1342 1343 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1344 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1345 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1346 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1347 Builder.CreateStore(AI++, EltPtr); 1348 } 1349 } else { 1350 llvm::AllocaInst *TempAlloca = 1351 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1352 TempAlloca->setAlignment(AlignmentToUse); 1353 llvm::Value *TempV = TempAlloca; 1354 1355 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1356 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1357 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1358 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1359 Builder.CreateStore(AI++, EltPtr); 1360 } 1361 1362 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1363 } 1364 } else { 1365 // Simple case, just do a coerced store of the argument into the alloca. 1366 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1367 AI->setName(Arg->getName() + ".coerce"); 1368 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1369 } 1370 1371 1372 // Match to what EmitParmDecl is expecting for this type. 1373 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1374 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1375 if (isPromoted) 1376 V = emitArgumentDemotion(*this, Arg, V); 1377 } 1378 EmitParmDecl(*Arg, V, ArgNo); 1379 continue; // Skip ++AI increment, already done. 1380 } 1381 1382 case ABIArgInfo::Expand: { 1383 // If this structure was expanded into multiple arguments then 1384 // we need to create a temporary and reconstruct it from the 1385 // arguments. 1386 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1387 CharUnits Align = getContext().getDeclAlign(Arg); 1388 Alloca->setAlignment(Align.getQuantity()); 1389 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1390 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1391 EmitParmDecl(*Arg, Alloca, ArgNo); 1392 1393 // Name the arguments used in expansion and increment AI. 1394 unsigned Index = 0; 1395 for (; AI != End; ++AI, ++Index) 1396 AI->setName(Arg->getName() + "." + Twine(Index)); 1397 continue; 1398 } 1399 1400 case ABIArgInfo::Ignore: 1401 // Initialize the local variable appropriately. 1402 if (!hasScalarEvaluationKind(Ty)) 1403 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1404 else 1405 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1406 ArgNo); 1407 1408 // Skip increment, no matching LLVM parameter. 1409 continue; 1410 } 1411 1412 ++AI; 1413 } 1414 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1415} 1416 1417static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1418 while (insn->use_empty()) { 1419 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1420 if (!bitcast) return; 1421 1422 // This is "safe" because we would have used a ConstantExpr otherwise. 1423 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1424 bitcast->eraseFromParent(); 1425 } 1426} 1427 1428/// Try to emit a fused autorelease of a return result. 1429static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1430 llvm::Value *result) { 1431 // We must be immediately followed the cast. 1432 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1433 if (BB->empty()) return 0; 1434 if (&BB->back() != result) return 0; 1435 1436 llvm::Type *resultType = result->getType(); 1437 1438 // result is in a BasicBlock and is therefore an Instruction. 1439 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1440 1441 SmallVector<llvm::Instruction*,4> insnsToKill; 1442 1443 // Look for: 1444 // %generator = bitcast %type1* %generator2 to %type2* 1445 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1446 // We would have emitted this as a constant if the operand weren't 1447 // an Instruction. 1448 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1449 1450 // Require the generator to be immediately followed by the cast. 1451 if (generator->getNextNode() != bitcast) 1452 return 0; 1453 1454 insnsToKill.push_back(bitcast); 1455 } 1456 1457 // Look for: 1458 // %generator = call i8* @objc_retain(i8* %originalResult) 1459 // or 1460 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1461 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1462 if (!call) return 0; 1463 1464 bool doRetainAutorelease; 1465 1466 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1467 doRetainAutorelease = true; 1468 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1469 .objc_retainAutoreleasedReturnValue) { 1470 doRetainAutorelease = false; 1471 1472 // If we emitted an assembly marker for this call (and the 1473 // ARCEntrypoints field should have been set if so), go looking 1474 // for that call. If we can't find it, we can't do this 1475 // optimization. But it should always be the immediately previous 1476 // instruction, unless we needed bitcasts around the call. 1477 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1478 llvm::Instruction *prev = call->getPrevNode(); 1479 assert(prev); 1480 if (isa<llvm::BitCastInst>(prev)) { 1481 prev = prev->getPrevNode(); 1482 assert(prev); 1483 } 1484 assert(isa<llvm::CallInst>(prev)); 1485 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1486 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1487 insnsToKill.push_back(prev); 1488 } 1489 } else { 1490 return 0; 1491 } 1492 1493 result = call->getArgOperand(0); 1494 insnsToKill.push_back(call); 1495 1496 // Keep killing bitcasts, for sanity. Note that we no longer care 1497 // about precise ordering as long as there's exactly one use. 1498 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1499 if (!bitcast->hasOneUse()) break; 1500 insnsToKill.push_back(bitcast); 1501 result = bitcast->getOperand(0); 1502 } 1503 1504 // Delete all the unnecessary instructions, from latest to earliest. 1505 for (SmallVectorImpl<llvm::Instruction*>::iterator 1506 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1507 (*i)->eraseFromParent(); 1508 1509 // Do the fused retain/autorelease if we were asked to. 1510 if (doRetainAutorelease) 1511 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1512 1513 // Cast back to the result type. 1514 return CGF.Builder.CreateBitCast(result, resultType); 1515} 1516 1517/// If this is a +1 of the value of an immutable 'self', remove it. 1518static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1519 llvm::Value *result) { 1520 // This is only applicable to a method with an immutable 'self'. 1521 const ObjCMethodDecl *method = 1522 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1523 if (!method) return 0; 1524 const VarDecl *self = method->getSelfDecl(); 1525 if (!self->getType().isConstQualified()) return 0; 1526 1527 // Look for a retain call. 1528 llvm::CallInst *retainCall = 1529 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1530 if (!retainCall || 1531 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1532 return 0; 1533 1534 // Look for an ordinary load of 'self'. 1535 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1536 llvm::LoadInst *load = 1537 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1538 if (!load || load->isAtomic() || load->isVolatile() || 1539 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1540 return 0; 1541 1542 // Okay! Burn it all down. This relies for correctness on the 1543 // assumption that the retain is emitted as part of the return and 1544 // that thereafter everything is used "linearly". 1545 llvm::Type *resultType = result->getType(); 1546 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1547 assert(retainCall->use_empty()); 1548 retainCall->eraseFromParent(); 1549 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1550 1551 return CGF.Builder.CreateBitCast(load, resultType); 1552} 1553 1554/// Emit an ARC autorelease of the result of a function. 1555/// 1556/// \return the value to actually return from the function 1557static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1558 llvm::Value *result) { 1559 // If we're returning 'self', kill the initial retain. This is a 1560 // heuristic attempt to "encourage correctness" in the really unfortunate 1561 // case where we have a return of self during a dealloc and we desperately 1562 // need to avoid the possible autorelease. 1563 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1564 return self; 1565 1566 // At -O0, try to emit a fused retain/autorelease. 1567 if (CGF.shouldUseFusedARCCalls()) 1568 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1569 return fused; 1570 1571 return CGF.EmitARCAutoreleaseReturnValue(result); 1572} 1573 1574/// Heuristically search for a dominating store to the return-value slot. 1575static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1576 // If there are multiple uses of the return-value slot, just check 1577 // for something immediately preceding the IP. Sometimes this can 1578 // happen with how we generate implicit-returns; it can also happen 1579 // with noreturn cleanups. 1580 if (!CGF.ReturnValue->hasOneUse()) { 1581 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1582 if (IP->empty()) return 0; 1583 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1584 if (!store) return 0; 1585 if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1586 assert(!store->isAtomic() && !store->isVolatile()); // see below 1587 return store; 1588 } 1589 1590 llvm::StoreInst *store = 1591 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1592 if (!store) return 0; 1593 1594 // These aren't actually possible for non-coerced returns, and we 1595 // only care about non-coerced returns on this code path. 1596 assert(!store->isAtomic() && !store->isVolatile()); 1597 1598 // Now do a first-and-dirty dominance check: just walk up the 1599 // single-predecessors chain from the current insertion point. 1600 llvm::BasicBlock *StoreBB = store->getParent(); 1601 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1602 while (IP != StoreBB) { 1603 if (!(IP = IP->getSinglePredecessor())) 1604 return 0; 1605 } 1606 1607 // Okay, the store's basic block dominates the insertion point; we 1608 // can do our thing. 1609 return store; 1610} 1611 1612/// Check whether 'this' argument of a callsite matches 'this' of the caller. 1613static bool checkThisPointer(llvm::Value *ThisArg, llvm::Value *This) { 1614 if (ThisArg == This) 1615 return true; 1616 // Check whether ThisArg is a bitcast of This. 1617 llvm::BitCastInst *Bitcast; 1618 if ((Bitcast = dyn_cast<llvm::BitCastInst>(ThisArg)) && 1619 Bitcast->getOperand(0) == This) 1620 return true; 1621 return false; 1622} 1623 1624void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1625 bool EmitRetDbgLoc) { 1626 // Functions with no result always return void. 1627 if (ReturnValue == 0) { 1628 Builder.CreateRetVoid(); 1629 return; 1630 } 1631 1632 llvm::DebugLoc RetDbgLoc; 1633 llvm::Value *RV = 0; 1634 QualType RetTy = FI.getReturnType(); 1635 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1636 1637 switch (RetAI.getKind()) { 1638 case ABIArgInfo::Indirect: { 1639 switch (getEvaluationKind(RetTy)) { 1640 case TEK_Complex: { 1641 ComplexPairTy RT = 1642 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy)); 1643 EmitStoreOfComplex(RT, 1644 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1645 /*isInit*/ true); 1646 break; 1647 } 1648 case TEK_Aggregate: 1649 // Do nothing; aggregrates get evaluated directly into the destination. 1650 break; 1651 case TEK_Scalar: 1652 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 1653 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1654 /*isInit*/ true); 1655 break; 1656 } 1657 break; 1658 } 1659 1660 case ABIArgInfo::Extend: 1661 case ABIArgInfo::Direct: 1662 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1663 RetAI.getDirectOffset() == 0) { 1664 // The internal return value temp always will have pointer-to-return-type 1665 // type, just do a load. 1666 1667 // If there is a dominating store to ReturnValue, we can elide 1668 // the load, zap the store, and usually zap the alloca. 1669 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1670 // Reuse the debug location from the store unless we're told not to. 1671 if (EmitRetDbgLoc) 1672 RetDbgLoc = SI->getDebugLoc(); 1673 // Get the stored value and nuke the now-dead store. 1674 RV = SI->getValueOperand(); 1675 SI->eraseFromParent(); 1676 1677 // If that was the only use of the return value, nuke it as well now. 1678 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1679 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1680 ReturnValue = 0; 1681 } 1682 1683 // Otherwise, we have to do a simple load. 1684 } else { 1685 RV = Builder.CreateLoad(ReturnValue); 1686 } 1687 } else { 1688 llvm::Value *V = ReturnValue; 1689 // If the value is offset in memory, apply the offset now. 1690 if (unsigned Offs = RetAI.getDirectOffset()) { 1691 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1692 V = Builder.CreateConstGEP1_32(V, Offs); 1693 V = Builder.CreateBitCast(V, 1694 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1695 } 1696 1697 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1698 } 1699 1700 // In ARC, end functions that return a retainable type with a call 1701 // to objc_autoreleaseReturnValue. 1702 if (AutoreleaseResult) { 1703 assert(getLangOpts().ObjCAutoRefCount && 1704 !FI.isReturnsRetained() && 1705 RetTy->isObjCRetainableType()); 1706 RV = emitAutoreleaseOfResult(*this, RV); 1707 } 1708 1709 break; 1710 1711 case ABIArgInfo::Ignore: 1712 break; 1713 1714 case ABIArgInfo::Expand: 1715 llvm_unreachable("Invalid ABI kind for return argument"); 1716 } 1717 1718 // If this function returns 'this', the last instruction is a CallInst 1719 // that returns 'this', and 'this' argument of the CallInst points to 1720 // the same object as CXXThisValue, use the return value from the CallInst. 1721 // We will not need to keep 'this' alive through the callsite. It also enables 1722 // optimizations in the backend, such as tail call optimization. 1723 if (CalleeWithThisReturn && CGM.getCXXABI().HasThisReturn(CurGD)) { 1724 llvm::BasicBlock *IP = Builder.GetInsertBlock(); 1725 llvm::CallInst *Callsite; 1726 if (!IP->empty() && (Callsite = dyn_cast<llvm::CallInst>(&IP->back())) && 1727 Callsite->getCalledFunction() == CalleeWithThisReturn && 1728 checkThisPointer(Callsite->getOperand(0), CXXThisValue)) 1729 RV = Builder.CreateBitCast(Callsite, RetAI.getCoerceToType()); 1730 } 1731 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1732 if (!RetDbgLoc.isUnknown()) 1733 Ret->setDebugLoc(RetDbgLoc); 1734} 1735 1736void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1737 const VarDecl *param) { 1738 // StartFunction converted the ABI-lowered parameter(s) into a 1739 // local alloca. We need to turn that into an r-value suitable 1740 // for EmitCall. 1741 llvm::Value *local = GetAddrOfLocalVar(param); 1742 1743 QualType type = param->getType(); 1744 1745 // For the most part, we just need to load the alloca, except: 1746 // 1) aggregate r-values are actually pointers to temporaries, and 1747 // 2) references to non-scalars are pointers directly to the aggregate. 1748 // I don't know why references to scalars are different here. 1749 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1750 if (!hasScalarEvaluationKind(ref->getPointeeType())) 1751 return args.add(RValue::getAggregate(local), type); 1752 1753 // Locals which are references to scalars are represented 1754 // with allocas holding the pointer. 1755 return args.add(RValue::get(Builder.CreateLoad(local)), type); 1756 } 1757 1758 args.add(convertTempToRValue(local, type), type); 1759} 1760 1761static bool isProvablyNull(llvm::Value *addr) { 1762 return isa<llvm::ConstantPointerNull>(addr); 1763} 1764 1765static bool isProvablyNonNull(llvm::Value *addr) { 1766 return isa<llvm::AllocaInst>(addr); 1767} 1768 1769/// Emit the actual writing-back of a writeback. 1770static void emitWriteback(CodeGenFunction &CGF, 1771 const CallArgList::Writeback &writeback) { 1772 const LValue &srcLV = writeback.Source; 1773 llvm::Value *srcAddr = srcLV.getAddress(); 1774 assert(!isProvablyNull(srcAddr) && 1775 "shouldn't have writeback for provably null argument"); 1776 1777 llvm::BasicBlock *contBB = 0; 1778 1779 // If the argument wasn't provably non-null, we need to null check 1780 // before doing the store. 1781 bool provablyNonNull = isProvablyNonNull(srcAddr); 1782 if (!provablyNonNull) { 1783 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1784 contBB = CGF.createBasicBlock("icr.done"); 1785 1786 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1787 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1788 CGF.EmitBlock(writebackBB); 1789 } 1790 1791 // Load the value to writeback. 1792 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1793 1794 // Cast it back, in case we're writing an id to a Foo* or something. 1795 value = CGF.Builder.CreateBitCast(value, 1796 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1797 "icr.writeback-cast"); 1798 1799 // Perform the writeback. 1800 1801 // If we have a "to use" value, it's something we need to emit a use 1802 // of. This has to be carefully threaded in: if it's done after the 1803 // release it's potentially undefined behavior (and the optimizer 1804 // will ignore it), and if it happens before the retain then the 1805 // optimizer could move the release there. 1806 if (writeback.ToUse) { 1807 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 1808 1809 // Retain the new value. No need to block-copy here: the block's 1810 // being passed up the stack. 1811 value = CGF.EmitARCRetainNonBlock(value); 1812 1813 // Emit the intrinsic use here. 1814 CGF.EmitARCIntrinsicUse(writeback.ToUse); 1815 1816 // Load the old value (primitively). 1817 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV); 1818 1819 // Put the new value in place (primitively). 1820 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 1821 1822 // Release the old value. 1823 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 1824 1825 // Otherwise, we can just do a normal lvalue store. 1826 } else { 1827 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 1828 } 1829 1830 // Jump to the continuation block. 1831 if (!provablyNonNull) 1832 CGF.EmitBlock(contBB); 1833} 1834 1835static void emitWritebacks(CodeGenFunction &CGF, 1836 const CallArgList &args) { 1837 for (CallArgList::writeback_iterator 1838 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1839 emitWriteback(CGF, *i); 1840} 1841 1842static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 1843 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 1844 if (uop->getOpcode() == UO_AddrOf) 1845 return uop->getSubExpr(); 1846 return 0; 1847} 1848 1849/// Emit an argument that's being passed call-by-writeback. That is, 1850/// we are passing the address of 1851static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1852 const ObjCIndirectCopyRestoreExpr *CRE) { 1853 LValue srcLV; 1854 1855 // Make an optimistic effort to emit the address as an l-value. 1856 // This can fail if the the argument expression is more complicated. 1857 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 1858 srcLV = CGF.EmitLValue(lvExpr); 1859 1860 // Otherwise, just emit it as a scalar. 1861 } else { 1862 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1863 1864 QualType srcAddrType = 1865 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1866 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 1867 } 1868 llvm::Value *srcAddr = srcLV.getAddress(); 1869 1870 // The dest and src types don't necessarily match in LLVM terms 1871 // because of the crazy ObjC compatibility rules. 1872 1873 llvm::PointerType *destType = 1874 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1875 1876 // If the address is a constant null, just pass the appropriate null. 1877 if (isProvablyNull(srcAddr)) { 1878 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1879 CRE->getType()); 1880 return; 1881 } 1882 1883 // Create the temporary. 1884 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1885 "icr.temp"); 1886 // Loading an l-value can introduce a cleanup if the l-value is __weak, 1887 // and that cleanup will be conditional if we can't prove that the l-value 1888 // isn't null, so we need to register a dominating point so that the cleanups 1889 // system will make valid IR. 1890 CodeGenFunction::ConditionalEvaluation condEval(CGF); 1891 1892 // Zero-initialize it if we're not doing a copy-initialization. 1893 bool shouldCopy = CRE->shouldCopy(); 1894 if (!shouldCopy) { 1895 llvm::Value *null = 1896 llvm::ConstantPointerNull::get( 1897 cast<llvm::PointerType>(destType->getElementType())); 1898 CGF.Builder.CreateStore(null, temp); 1899 } 1900 1901 llvm::BasicBlock *contBB = 0; 1902 llvm::BasicBlock *originBB = 0; 1903 1904 // If the address is *not* known to be non-null, we need to switch. 1905 llvm::Value *finalArgument; 1906 1907 bool provablyNonNull = isProvablyNonNull(srcAddr); 1908 if (provablyNonNull) { 1909 finalArgument = temp; 1910 } else { 1911 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1912 1913 finalArgument = CGF.Builder.CreateSelect(isNull, 1914 llvm::ConstantPointerNull::get(destType), 1915 temp, "icr.argument"); 1916 1917 // If we need to copy, then the load has to be conditional, which 1918 // means we need control flow. 1919 if (shouldCopy) { 1920 originBB = CGF.Builder.GetInsertBlock(); 1921 contBB = CGF.createBasicBlock("icr.cont"); 1922 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1923 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1924 CGF.EmitBlock(copyBB); 1925 condEval.begin(CGF); 1926 } 1927 } 1928 1929 llvm::Value *valueToUse = 0; 1930 1931 // Perform a copy if necessary. 1932 if (shouldCopy) { 1933 RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1934 assert(srcRV.isScalar()); 1935 1936 llvm::Value *src = srcRV.getScalarVal(); 1937 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1938 "icr.cast"); 1939 1940 // Use an ordinary store, not a store-to-lvalue. 1941 CGF.Builder.CreateStore(src, temp); 1942 1943 // If optimization is enabled, and the value was held in a 1944 // __strong variable, we need to tell the optimizer that this 1945 // value has to stay alive until we're doing the store back. 1946 // This is because the temporary is effectively unretained, 1947 // and so otherwise we can violate the high-level semantics. 1948 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 1949 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 1950 valueToUse = src; 1951 } 1952 } 1953 1954 // Finish the control flow if we needed it. 1955 if (shouldCopy && !provablyNonNull) { 1956 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 1957 CGF.EmitBlock(contBB); 1958 1959 // Make a phi for the value to intrinsically use. 1960 if (valueToUse) { 1961 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 1962 "icr.to-use"); 1963 phiToUse->addIncoming(valueToUse, copyBB); 1964 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 1965 originBB); 1966 valueToUse = phiToUse; 1967 } 1968 1969 condEval.end(CGF); 1970 } 1971 1972 args.addWriteback(srcLV, temp, valueToUse); 1973 args.add(RValue::get(finalArgument), CRE->getType()); 1974} 1975 1976void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1977 QualType type) { 1978 if (const ObjCIndirectCopyRestoreExpr *CRE 1979 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1980 assert(getLangOpts().ObjCAutoRefCount); 1981 assert(getContext().hasSameType(E->getType(), type)); 1982 return emitWritebackArg(*this, args, CRE); 1983 } 1984 1985 assert(type->isReferenceType() == E->isGLValue() && 1986 "reference binding to unmaterialized r-value!"); 1987 1988 if (E->isGLValue()) { 1989 assert(E->getObjectKind() == OK_Ordinary); 1990 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1991 type); 1992 } 1993 1994 if (hasAggregateEvaluationKind(type) && 1995 isa<ImplicitCastExpr>(E) && 1996 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1997 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 1998 assert(L.isSimple()); 1999 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2000 return; 2001 } 2002 2003 args.add(EmitAnyExprToTemp(E), type); 2004} 2005 2006// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2007// optimizer it can aggressively ignore unwind edges. 2008void 2009CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2010 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2011 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2012 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2013 CGM.getNoObjCARCExceptionsMetadata()); 2014} 2015 2016/// Emits a call to the given no-arguments nounwind runtime function. 2017llvm::CallInst * 2018CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2019 const llvm::Twine &name) { 2020 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2021} 2022 2023/// Emits a call to the given nounwind runtime function. 2024llvm::CallInst * 2025CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2026 ArrayRef<llvm::Value*> args, 2027 const llvm::Twine &name) { 2028 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2029 call->setDoesNotThrow(); 2030 return call; 2031} 2032 2033/// Emits a simple call (never an invoke) to the given no-arguments 2034/// runtime function. 2035llvm::CallInst * 2036CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2037 const llvm::Twine &name) { 2038 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2039} 2040 2041/// Emits a simple call (never an invoke) to the given runtime 2042/// function. 2043llvm::CallInst * 2044CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2045 ArrayRef<llvm::Value*> args, 2046 const llvm::Twine &name) { 2047 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2048 call->setCallingConv(getRuntimeCC()); 2049 return call; 2050} 2051 2052/// Emits a call or invoke to the given noreturn runtime function. 2053void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2054 ArrayRef<llvm::Value*> args) { 2055 if (getInvokeDest()) { 2056 llvm::InvokeInst *invoke = 2057 Builder.CreateInvoke(callee, 2058 getUnreachableBlock(), 2059 getInvokeDest(), 2060 args); 2061 invoke->setDoesNotReturn(); 2062 invoke->setCallingConv(getRuntimeCC()); 2063 } else { 2064 llvm::CallInst *call = Builder.CreateCall(callee, args); 2065 call->setDoesNotReturn(); 2066 call->setCallingConv(getRuntimeCC()); 2067 Builder.CreateUnreachable(); 2068 } 2069} 2070 2071/// Emits a call or invoke instruction to the given nullary runtime 2072/// function. 2073llvm::CallSite 2074CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2075 const Twine &name) { 2076 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name); 2077} 2078 2079/// Emits a call or invoke instruction to the given runtime function. 2080llvm::CallSite 2081CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2082 ArrayRef<llvm::Value*> args, 2083 const Twine &name) { 2084 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2085 callSite.setCallingConv(getRuntimeCC()); 2086 return callSite; 2087} 2088 2089llvm::CallSite 2090CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2091 const Twine &Name) { 2092 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 2093} 2094 2095/// Emits a call or invoke instruction to the given function, depending 2096/// on the current state of the EH stack. 2097llvm::CallSite 2098CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2099 ArrayRef<llvm::Value *> Args, 2100 const Twine &Name) { 2101 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2102 2103 llvm::Instruction *Inst; 2104 if (!InvokeDest) 2105 Inst = Builder.CreateCall(Callee, Args, Name); 2106 else { 2107 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2108 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2109 EmitBlock(ContBB); 2110 } 2111 2112 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2113 // optimizer it can aggressively ignore unwind edges. 2114 if (CGM.getLangOpts().ObjCAutoRefCount) 2115 AddObjCARCExceptionMetadata(Inst); 2116 2117 return Inst; 2118} 2119 2120static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 2121 llvm::FunctionType *FTy) { 2122 if (ArgNo < FTy->getNumParams()) 2123 assert(Elt->getType() == FTy->getParamType(ArgNo)); 2124 else 2125 assert(FTy->isVarArg()); 2126 ++ArgNo; 2127} 2128 2129void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 2130 SmallVector<llvm::Value*,16> &Args, 2131 llvm::FunctionType *IRFuncTy) { 2132 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2133 unsigned NumElts = AT->getSize().getZExtValue(); 2134 QualType EltTy = AT->getElementType(); 2135 llvm::Value *Addr = RV.getAggregateAddr(); 2136 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 2137 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 2138 RValue EltRV = convertTempToRValue(EltAddr, EltTy); 2139 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 2140 } 2141 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2142 RecordDecl *RD = RT->getDecl(); 2143 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 2144 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 2145 2146 if (RD->isUnion()) { 2147 const FieldDecl *LargestFD = 0; 2148 CharUnits UnionSize = CharUnits::Zero(); 2149 2150 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2151 i != e; ++i) { 2152 const FieldDecl *FD = *i; 2153 assert(!FD->isBitField() && 2154 "Cannot expand structure with bit-field members."); 2155 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 2156 if (UnionSize < FieldSize) { 2157 UnionSize = FieldSize; 2158 LargestFD = FD; 2159 } 2160 } 2161 if (LargestFD) { 2162 RValue FldRV = EmitRValueForField(LV, LargestFD); 2163 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 2164 } 2165 } else { 2166 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2167 i != e; ++i) { 2168 FieldDecl *FD = *i; 2169 2170 RValue FldRV = EmitRValueForField(LV, FD); 2171 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 2172 } 2173 } 2174 } else if (Ty->isAnyComplexType()) { 2175 ComplexPairTy CV = RV.getComplexVal(); 2176 Args.push_back(CV.first); 2177 Args.push_back(CV.second); 2178 } else { 2179 assert(RV.isScalar() && 2180 "Unexpected non-scalar rvalue during struct expansion."); 2181 2182 // Insert a bitcast as needed. 2183 llvm::Value *V = RV.getScalarVal(); 2184 if (Args.size() < IRFuncTy->getNumParams() && 2185 V->getType() != IRFuncTy->getParamType(Args.size())) 2186 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 2187 2188 Args.push_back(V); 2189 } 2190} 2191 2192 2193RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2194 llvm::Value *Callee, 2195 ReturnValueSlot ReturnValue, 2196 const CallArgList &CallArgs, 2197 const Decl *TargetDecl, 2198 llvm::Instruction **callOrInvoke) { 2199 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2200 SmallVector<llvm::Value*, 16> Args; 2201 2202 // Handle struct-return functions by passing a pointer to the 2203 // location that we would like to return into. 2204 QualType RetTy = CallInfo.getReturnType(); 2205 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2206 2207 // IRArgNo - Keep track of the argument number in the callee we're looking at. 2208 unsigned IRArgNo = 0; 2209 llvm::FunctionType *IRFuncTy = 2210 cast<llvm::FunctionType>( 2211 cast<llvm::PointerType>(Callee->getType())->getElementType()); 2212 2213 // If the call returns a temporary with struct return, create a temporary 2214 // alloca to hold the result, unless one is given to us. 2215 if (CGM.ReturnTypeUsesSRet(CallInfo)) { 2216 llvm::Value *Value = ReturnValue.getValue(); 2217 if (!Value) 2218 Value = CreateMemTemp(RetTy); 2219 Args.push_back(Value); 2220 checkArgMatches(Value, IRArgNo, IRFuncTy); 2221 } 2222 2223 assert(CallInfo.arg_size() == CallArgs.size() && 2224 "Mismatch between function signature & arguments."); 2225 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2226 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2227 I != E; ++I, ++info_it) { 2228 const ABIArgInfo &ArgInfo = info_it->info; 2229 RValue RV = I->RV; 2230 2231 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 2232 2233 // Insert a padding argument to ensure proper alignment. 2234 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2235 Args.push_back(llvm::UndefValue::get(PaddingType)); 2236 ++IRArgNo; 2237 } 2238 2239 switch (ArgInfo.getKind()) { 2240 case ABIArgInfo::Indirect: { 2241 if (RV.isScalar() || RV.isComplex()) { 2242 // Make a temporary alloca to pass the argument. 2243 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2244 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2245 AI->setAlignment(ArgInfo.getIndirectAlign()); 2246 Args.push_back(AI); 2247 2248 LValue argLV = 2249 MakeAddrLValue(Args.back(), I->Ty, TypeAlign); 2250 2251 if (RV.isScalar()) 2252 EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true); 2253 else 2254 EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true); 2255 2256 // Validate argument match. 2257 checkArgMatches(AI, IRArgNo, IRFuncTy); 2258 } else { 2259 // We want to avoid creating an unnecessary temporary+copy here; 2260 // however, we need one in three cases: 2261 // 1. If the argument is not byval, and we are required to copy the 2262 // source. (This case doesn't occur on any common architecture.) 2263 // 2. If the argument is byval, RV is not sufficiently aligned, and 2264 // we cannot force it to be sufficiently aligned. 2265 // 3. If the argument is byval, but RV is located in an address space 2266 // different than that of the argument (0). 2267 llvm::Value *Addr = RV.getAggregateAddr(); 2268 unsigned Align = ArgInfo.getIndirectAlign(); 2269 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2270 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 2271 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ? 2272 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0); 2273 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2274 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 2275 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 2276 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 2277 // Create an aligned temporary, and copy to it. 2278 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2279 if (Align > AI->getAlignment()) 2280 AI->setAlignment(Align); 2281 Args.push_back(AI); 2282 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2283 2284 // Validate argument match. 2285 checkArgMatches(AI, IRArgNo, IRFuncTy); 2286 } else { 2287 // Skip the extra memcpy call. 2288 Args.push_back(Addr); 2289 2290 // Validate argument match. 2291 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2292 } 2293 } 2294 break; 2295 } 2296 2297 case ABIArgInfo::Ignore: 2298 break; 2299 2300 case ABIArgInfo::Extend: 2301 case ABIArgInfo::Direct: { 2302 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2303 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2304 ArgInfo.getDirectOffset() == 0) { 2305 llvm::Value *V; 2306 if (RV.isScalar()) 2307 V = RV.getScalarVal(); 2308 else 2309 V = Builder.CreateLoad(RV.getAggregateAddr()); 2310 2311 // If the argument doesn't match, perform a bitcast to coerce it. This 2312 // can happen due to trivial type mismatches. 2313 if (IRArgNo < IRFuncTy->getNumParams() && 2314 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2315 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2316 Args.push_back(V); 2317 2318 checkArgMatches(V, IRArgNo, IRFuncTy); 2319 break; 2320 } 2321 2322 // FIXME: Avoid the conversion through memory if possible. 2323 llvm::Value *SrcPtr; 2324 if (RV.isScalar() || RV.isComplex()) { 2325 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2326 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 2327 if (RV.isScalar()) { 2328 EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true); 2329 } else { 2330 EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true); 2331 } 2332 } else 2333 SrcPtr = RV.getAggregateAddr(); 2334 2335 // If the value is offset in memory, apply the offset now. 2336 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2337 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2338 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2339 SrcPtr = Builder.CreateBitCast(SrcPtr, 2340 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2341 2342 } 2343 2344 // If the coerce-to type is a first class aggregate, we flatten it and 2345 // pass the elements. Either way is semantically identical, but fast-isel 2346 // and the optimizer generally likes scalar values better than FCAs. 2347 if (llvm::StructType *STy = 2348 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2349 llvm::Type *SrcTy = 2350 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2351 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2352 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2353 2354 // If the source type is smaller than the destination type of the 2355 // coerce-to logic, copy the source value into a temp alloca the size 2356 // of the destination type to allow loading all of it. The bits past 2357 // the source value are left undef. 2358 if (SrcSize < DstSize) { 2359 llvm::AllocaInst *TempAlloca 2360 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2361 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2362 SrcPtr = TempAlloca; 2363 } else { 2364 SrcPtr = Builder.CreateBitCast(SrcPtr, 2365 llvm::PointerType::getUnqual(STy)); 2366 } 2367 2368 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2369 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2370 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2371 // We don't know what we're loading from. 2372 LI->setAlignment(1); 2373 Args.push_back(LI); 2374 2375 // Validate argument match. 2376 checkArgMatches(LI, IRArgNo, IRFuncTy); 2377 } 2378 } else { 2379 // In the simple case, just pass the coerced loaded value. 2380 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2381 *this)); 2382 2383 // Validate argument match. 2384 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2385 } 2386 2387 break; 2388 } 2389 2390 case ABIArgInfo::Expand: 2391 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2392 IRArgNo = Args.size(); 2393 break; 2394 } 2395 } 2396 2397 // If the callee is a bitcast of a function to a varargs pointer to function 2398 // type, check to see if we can remove the bitcast. This handles some cases 2399 // with unprototyped functions. 2400 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2401 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2402 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2403 llvm::FunctionType *CurFT = 2404 cast<llvm::FunctionType>(CurPT->getElementType()); 2405 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2406 2407 if (CE->getOpcode() == llvm::Instruction::BitCast && 2408 ActualFT->getReturnType() == CurFT->getReturnType() && 2409 ActualFT->getNumParams() == CurFT->getNumParams() && 2410 ActualFT->getNumParams() == Args.size() && 2411 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2412 bool ArgsMatch = true; 2413 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2414 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2415 ArgsMatch = false; 2416 break; 2417 } 2418 2419 // Strip the cast if we can get away with it. This is a nice cleanup, 2420 // but also allows us to inline the function at -O0 if it is marked 2421 // always_inline. 2422 if (ArgsMatch) 2423 Callee = CalleeF; 2424 } 2425 } 2426 2427 unsigned CallingConv; 2428 CodeGen::AttributeListType AttributeList; 2429 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 2430 CallingConv, true); 2431 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 2432 AttributeList); 2433 2434 llvm::BasicBlock *InvokeDest = 0; 2435 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 2436 llvm::Attribute::NoUnwind)) 2437 InvokeDest = getInvokeDest(); 2438 2439 llvm::CallSite CS; 2440 if (!InvokeDest) { 2441 CS = Builder.CreateCall(Callee, Args); 2442 } else { 2443 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2444 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2445 EmitBlock(Cont); 2446 } 2447 if (callOrInvoke) 2448 *callOrInvoke = CS.getInstruction(); 2449 2450 CS.setAttributes(Attrs); 2451 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2452 2453 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2454 // optimizer it can aggressively ignore unwind edges. 2455 if (CGM.getLangOpts().ObjCAutoRefCount) 2456 AddObjCARCExceptionMetadata(CS.getInstruction()); 2457 2458 // If the call doesn't return, finish the basic block and clear the 2459 // insertion point; this allows the rest of IRgen to discard 2460 // unreachable code. 2461 if (CS.doesNotReturn()) { 2462 Builder.CreateUnreachable(); 2463 Builder.ClearInsertionPoint(); 2464 2465 // FIXME: For now, emit a dummy basic block because expr emitters in 2466 // generally are not ready to handle emitting expressions at unreachable 2467 // points. 2468 EnsureInsertPoint(); 2469 2470 // Return a reasonable RValue. 2471 return GetUndefRValue(RetTy); 2472 } 2473 2474 llvm::Instruction *CI = CS.getInstruction(); 2475 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2476 CI->setName("call"); 2477 2478 // Emit any writebacks immediately. Arguably this should happen 2479 // after any return-value munging. 2480 if (CallArgs.hasWritebacks()) 2481 emitWritebacks(*this, CallArgs); 2482 2483 switch (RetAI.getKind()) { 2484 case ABIArgInfo::Indirect: 2485 return convertTempToRValue(Args[0], RetTy); 2486 2487 case ABIArgInfo::Ignore: 2488 // If we are ignoring an argument that had a result, make sure to 2489 // construct the appropriate return value for our caller. 2490 return GetUndefRValue(RetTy); 2491 2492 case ABIArgInfo::Extend: 2493 case ABIArgInfo::Direct: { 2494 llvm::Type *RetIRTy = ConvertType(RetTy); 2495 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2496 switch (getEvaluationKind(RetTy)) { 2497 case TEK_Complex: { 2498 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2499 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2500 return RValue::getComplex(std::make_pair(Real, Imag)); 2501 } 2502 case TEK_Aggregate: { 2503 llvm::Value *DestPtr = ReturnValue.getValue(); 2504 bool DestIsVolatile = ReturnValue.isVolatile(); 2505 2506 if (!DestPtr) { 2507 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2508 DestIsVolatile = false; 2509 } 2510 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2511 return RValue::getAggregate(DestPtr); 2512 } 2513 case TEK_Scalar: { 2514 // If the argument doesn't match, perform a bitcast to coerce it. This 2515 // can happen due to trivial type mismatches. 2516 llvm::Value *V = CI; 2517 if (V->getType() != RetIRTy) 2518 V = Builder.CreateBitCast(V, RetIRTy); 2519 return RValue::get(V); 2520 } 2521 } 2522 llvm_unreachable("bad evaluation kind"); 2523 } 2524 2525 llvm::Value *DestPtr = ReturnValue.getValue(); 2526 bool DestIsVolatile = ReturnValue.isVolatile(); 2527 2528 if (!DestPtr) { 2529 DestPtr = CreateMemTemp(RetTy, "coerce"); 2530 DestIsVolatile = false; 2531 } 2532 2533 // If the value is offset in memory, apply the offset now. 2534 llvm::Value *StorePtr = DestPtr; 2535 if (unsigned Offs = RetAI.getDirectOffset()) { 2536 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2537 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2538 StorePtr = Builder.CreateBitCast(StorePtr, 2539 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2540 } 2541 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2542 2543 return convertTempToRValue(DestPtr, RetTy); 2544 } 2545 2546 case ABIArgInfo::Expand: 2547 llvm_unreachable("Invalid ABI kind for return argument"); 2548 } 2549 2550 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2551} 2552 2553/* VarArg handling */ 2554 2555llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2556 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2557} 2558