1224145Sdim//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===// 2193326Sed// 3193326Sed// The LLVM Compiler Infrastructure 4193326Sed// 5193326Sed// This file is distributed under the University of Illinois Open Source 6193326Sed// License. See LICENSE.TXT for details. 7193326Sed// 8193326Sed//===----------------------------------------------------------------------===// 9193326Sed// 10193326Sed// These classes wrap the information about a call or function 11193326Sed// definition used to handle ABI compliancy. 12193326Sed// 13193326Sed//===----------------------------------------------------------------------===// 14193326Sed 15193326Sed#include "CGCall.h" 16249423Sdim#include "ABIInfo.h" 17212904Sdim#include "CGCXXABI.h" 18193326Sed#include "CodeGenFunction.h" 19193326Sed#include "CodeGenModule.h" 20234353Sdim#include "TargetInfo.h" 21193326Sed#include "clang/AST/Decl.h" 22193326Sed#include "clang/AST/DeclCXX.h" 23193326Sed#include "clang/AST/DeclObjC.h" 24249423Sdim#include "clang/Basic/TargetInfo.h" 25210299Sed#include "clang/Frontend/CodeGenOptions.h" 26249423Sdim#include "llvm/ADT/StringExtras.h" 27249423Sdim#include "llvm/IR/Attributes.h" 28249423Sdim#include "llvm/IR/DataLayout.h" 29249423Sdim#include "llvm/IR/InlineAsm.h" 30249423Sdim#include "llvm/MC/SubtargetFeature.h" 31193326Sed#include "llvm/Support/CallSite.h" 32224145Sdim#include "llvm/Transforms/Utils/Local.h" 33193326Sedusing namespace clang; 34193326Sedusing namespace CodeGen; 35193326Sed 36193326Sed/***/ 37193326Sed 38203955Srdivackystatic unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 39203955Srdivacky switch (CC) { 40203955Srdivacky default: return llvm::CallingConv::C; 41203955Srdivacky case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 42203955Srdivacky case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 43208600Srdivacky case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 44256030Sdim case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 45256030Sdim case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 46221345Sdim case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 47221345Sdim case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 48249423Sdim case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 49212904Sdim // TODO: add support for CC_X86Pascal to llvm 50203955Srdivacky } 51203955Srdivacky} 52203955Srdivacky 53204643Srdivacky/// Derives the 'this' type for codegen purposes, i.e. ignoring method 54204643Srdivacky/// qualification. 55204643Srdivacky/// FIXME: address space qualification? 56204643Srdivackystatic CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 57204643Srdivacky QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 58204643Srdivacky return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 59193326Sed} 60193326Sed 61204643Srdivacky/// Returns the canonical formal type of the given C++ method. 62204643Srdivackystatic CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 63204643Srdivacky return MD->getType()->getCanonicalTypeUnqualified() 64204643Srdivacky .getAs<FunctionProtoType>(); 65204643Srdivacky} 66204643Srdivacky 67204643Srdivacky/// Returns the "extra-canonicalized" return type, which discards 68204643Srdivacky/// qualifiers on the return type. Codegen doesn't care about them, 69204643Srdivacky/// and it makes ABI code a little easier to be able to assume that 70204643Srdivacky/// all parameter and return types are top-level unqualified. 71204643Srdivackystatic CanQualType GetReturnType(QualType RetTy) { 72204643Srdivacky return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 73204643Srdivacky} 74204643Srdivacky 75239462Sdim/// Arrange the argument and result information for a value of the given 76239462Sdim/// unprototyped freestanding function type. 77204643Srdivackyconst CGFunctionInfo & 78239462SdimCodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 79234353Sdim // When translating an unprototyped function type, always use a 80234353Sdim // variadic type. 81239462Sdim return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(), 82251662Sdim None, FTNP->getExtInfo(), RequiredArgs(0)); 83204643Srdivacky} 84204643Srdivacky 85239462Sdim/// Arrange the LLVM function layout for a value of the given function 86239462Sdim/// type, on top of any implicit parameters already stored. Use the 87239462Sdim/// given ExtInfo instead of the ExtInfo from the function type. 88239462Sdimstatic const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT, 89239462Sdim SmallVectorImpl<CanQualType> &prefix, 90239462Sdim CanQual<FunctionProtoType> FTP, 91239462Sdim FunctionType::ExtInfo extInfo) { 92239462Sdim RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 93193326Sed // FIXME: Kill copy. 94193326Sed for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 95239462Sdim prefix.push_back(FTP->getArgType(i)); 96234353Sdim CanQualType resultType = FTP->getResultType().getUnqualifiedType(); 97239462Sdim return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required); 98193326Sed} 99193326Sed 100239462Sdim/// Arrange the argument and result information for a free function (i.e. 101239462Sdim/// not a C++ or ObjC instance method) of the given type. 102239462Sdimstatic const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT, 103239462Sdim SmallVectorImpl<CanQualType> &prefix, 104239462Sdim CanQual<FunctionProtoType> FTP) { 105239462Sdim return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo()); 106239462Sdim} 107239462Sdim 108239462Sdim/// Given the formal ext-info of a C++ instance method, adjust it 109239462Sdim/// according to the C++ ABI in effect. 110239462Sdimstatic void adjustCXXMethodInfo(CodeGenTypes &CGT, 111239462Sdim FunctionType::ExtInfo &extInfo, 112239462Sdim bool isVariadic) { 113239462Sdim if (extInfo.getCC() == CC_Default) { 114239462Sdim CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic); 115239462Sdim extInfo = extInfo.withCallingConv(CC); 116239462Sdim } 117239462Sdim} 118239462Sdim 119239462Sdim/// Arrange the argument and result information for a free function (i.e. 120239462Sdim/// not a C++ or ObjC instance method) of the given type. 121239462Sdimstatic const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT, 122239462Sdim SmallVectorImpl<CanQualType> &prefix, 123239462Sdim CanQual<FunctionProtoType> FTP) { 124239462Sdim FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 125239462Sdim adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic()); 126239462Sdim return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo); 127239462Sdim} 128239462Sdim 129234353Sdim/// Arrange the argument and result information for a value of the 130239462Sdim/// given freestanding function type. 131204643Srdivackyconst CGFunctionInfo & 132239462SdimCodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 133234353Sdim SmallVector<CanQualType, 16> argTypes; 134239462Sdim return ::arrangeFreeFunctionType(*this, argTypes, FTP); 135204643Srdivacky} 136204643Srdivacky 137203955Srdivackystatic CallingConv getCallingConventionForDecl(const Decl *D) { 138198092Srdivacky // Set the appropriate calling convention for the Function. 139198092Srdivacky if (D->hasAttr<StdCallAttr>()) 140203955Srdivacky return CC_X86StdCall; 141198092Srdivacky 142198092Srdivacky if (D->hasAttr<FastCallAttr>()) 143203955Srdivacky return CC_X86FastCall; 144198092Srdivacky 145208600Srdivacky if (D->hasAttr<ThisCallAttr>()) 146208600Srdivacky return CC_X86ThisCall; 147208600Srdivacky 148212904Sdim if (D->hasAttr<PascalAttr>()) 149212904Sdim return CC_X86Pascal; 150212904Sdim 151221345Sdim if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 152221345Sdim return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 153221345Sdim 154243830Sdim if (D->hasAttr<PnaclCallAttr>()) 155243830Sdim return CC_PnaclCall; 156243830Sdim 157249423Sdim if (D->hasAttr<IntelOclBiccAttr>()) 158249423Sdim return CC_IntelOclBicc; 159249423Sdim 160203955Srdivacky return CC_C; 161198092Srdivacky} 162198092Srdivacky 163234353Sdim/// Arrange the argument and result information for a call to an 164234353Sdim/// unknown C++ non-static member function of the given abstract type. 165234353Sdim/// The member function must be an ordinary function, i.e. not a 166234353Sdim/// constructor or destructor. 167234353Sdimconst CGFunctionInfo & 168234353SdimCodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 169234353Sdim const FunctionProtoType *FTP) { 170234353Sdim SmallVector<CanQualType, 16> argTypes; 171204643Srdivacky 172198092Srdivacky // Add the 'this' pointer. 173234353Sdim argTypes.push_back(GetThisType(Context, RD)); 174204643Srdivacky 175239462Sdim return ::arrangeCXXMethodType(*this, argTypes, 176204643Srdivacky FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 177198092Srdivacky} 178198092Srdivacky 179234353Sdim/// Arrange the argument and result information for a declaration or 180234353Sdim/// definition of the given C++ non-static member function. The 181234353Sdim/// member function must be an ordinary function, i.e. not a 182234353Sdim/// constructor or destructor. 183234353Sdimconst CGFunctionInfo & 184234353SdimCodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 185212904Sdim assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!"); 186212904Sdim assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 187212904Sdim 188234353Sdim CanQual<FunctionProtoType> prototype = GetFormalType(MD); 189198092Srdivacky 190234353Sdim if (MD->isInstance()) { 191234353Sdim // The abstract case is perfectly fine. 192234353Sdim return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr()); 193234353Sdim } 194234353Sdim 195239462Sdim return arrangeFreeFunctionType(prototype); 196193326Sed} 197193326Sed 198234353Sdim/// Arrange the argument and result information for a declaration 199234353Sdim/// or definition to the given constructor variant. 200234353Sdimconst CGFunctionInfo & 201234353SdimCodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 202234353Sdim CXXCtorType ctorKind) { 203234353Sdim SmallVector<CanQualType, 16> argTypes; 204234353Sdim argTypes.push_back(GetThisType(Context, D->getParent())); 205234353Sdim CanQualType resultType = Context.VoidTy; 206199990Srdivacky 207234353Sdim TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 208204643Srdivacky 209212904Sdim CanQual<FunctionProtoType> FTP = GetFormalType(D); 210212904Sdim 211234353Sdim RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size()); 212234353Sdim 213212904Sdim // Add the formal parameters. 214212904Sdim for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 215234353Sdim argTypes.push_back(FTP->getArgType(i)); 216212904Sdim 217239462Sdim FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 218239462Sdim adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic()); 219239462Sdim return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required); 220199990Srdivacky} 221199990Srdivacky 222234353Sdim/// Arrange the argument and result information for a declaration, 223234353Sdim/// definition, or call to the given destructor variant. It so 224234353Sdim/// happens that all three cases produce the same information. 225234353Sdimconst CGFunctionInfo & 226234353SdimCodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 227234353Sdim CXXDtorType dtorKind) { 228234353Sdim SmallVector<CanQualType, 2> argTypes; 229234353Sdim argTypes.push_back(GetThisType(Context, D->getParent())); 230234353Sdim CanQualType resultType = Context.VoidTy; 231204643Srdivacky 232234353Sdim TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 233212904Sdim 234212904Sdim CanQual<FunctionProtoType> FTP = GetFormalType(D); 235212904Sdim assert(FTP->getNumArgs() == 0 && "dtor with formal parameters"); 236239462Sdim assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 237212904Sdim 238239462Sdim FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 239239462Sdim adjustCXXMethodInfo(*this, extInfo, false); 240239462Sdim return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, 241239462Sdim RequiredArgs::All); 242199990Srdivacky} 243199990Srdivacky 244234353Sdim/// Arrange the argument and result information for the declaration or 245234353Sdim/// definition of the given function. 246234353Sdimconst CGFunctionInfo & 247234353SdimCodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 248193326Sed if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 249193326Sed if (MD->isInstance()) 250234353Sdim return arrangeCXXMethodDeclaration(MD); 251198092Srdivacky 252204643Srdivacky CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 253234353Sdim 254204643Srdivacky assert(isa<FunctionType>(FTy)); 255234353Sdim 256234353Sdim // When declaring a function without a prototype, always use a 257234353Sdim // non-variadic type. 258234353Sdim if (isa<FunctionNoProtoType>(FTy)) { 259234353Sdim CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 260251662Sdim return arrangeLLVMFunctionInfo(noProto->getResultType(), None, 261251662Sdim noProto->getExtInfo(), RequiredArgs::All); 262234353Sdim } 263234353Sdim 264204643Srdivacky assert(isa<FunctionProtoType>(FTy)); 265239462Sdim return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 266193326Sed} 267193326Sed 268234353Sdim/// Arrange the argument and result information for the declaration or 269234353Sdim/// definition of an Objective-C method. 270234353Sdimconst CGFunctionInfo & 271234353SdimCodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 272234353Sdim // It happens that this is the same as a call with no optional 273234353Sdim // arguments, except also using the formal 'self' type. 274234353Sdim return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 275234353Sdim} 276234353Sdim 277234353Sdim/// Arrange the argument and result information for the function type 278234353Sdim/// through which to perform a send to the given Objective-C method, 279234353Sdim/// using the given receiver type. The receiver type is not always 280234353Sdim/// the 'self' type of the method or even an Objective-C pointer type. 281234353Sdim/// This is *not* the right method for actually performing such a 282234353Sdim/// message send, due to the possibility of optional arguments. 283234353Sdimconst CGFunctionInfo & 284234353SdimCodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 285234353Sdim QualType receiverType) { 286234353Sdim SmallVector<CanQualType, 16> argTys; 287234353Sdim argTys.push_back(Context.getCanonicalParamType(receiverType)); 288234353Sdim argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 289193326Sed // FIXME: Kill copy? 290226633Sdim for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(), 291204643Srdivacky e = MD->param_end(); i != e; ++i) { 292234353Sdim argTys.push_back(Context.getCanonicalParamType((*i)->getType())); 293204643Srdivacky } 294224145Sdim 295224145Sdim FunctionType::ExtInfo einfo; 296224145Sdim einfo = einfo.withCallingConv(getCallingConventionForDecl(MD)); 297224145Sdim 298234353Sdim if (getContext().getLangOpts().ObjCAutoRefCount && 299224145Sdim MD->hasAttr<NSReturnsRetainedAttr>()) 300224145Sdim einfo = einfo.withProducesResult(true); 301224145Sdim 302234353Sdim RequiredArgs required = 303234353Sdim (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 304234353Sdim 305239462Sdim return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys, 306239462Sdim einfo, required); 307193326Sed} 308193326Sed 309234353Sdimconst CGFunctionInfo & 310234353SdimCodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 311203955Srdivacky // FIXME: Do we need to handle ObjCMethodDecl? 312203955Srdivacky const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 313218893Sdim 314203955Srdivacky if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 315234353Sdim return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 316203955Srdivacky 317203955Srdivacky if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 318234353Sdim return arrangeCXXDestructor(DD, GD.getDtorType()); 319218893Sdim 320234353Sdim return arrangeFunctionDeclaration(FD); 321203955Srdivacky} 322203955Srdivacky 323249423Sdim/// Arrange a call as unto a free function, except possibly with an 324249423Sdim/// additional number of formal parameters considered required. 325249423Sdimstatic const CGFunctionInfo & 326249423SdimarrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 327249423Sdim const CallArgList &args, 328249423Sdim const FunctionType *fnType, 329249423Sdim unsigned numExtraRequiredArgs) { 330249423Sdim assert(args.size() >= numExtraRequiredArgs); 331249423Sdim 332249423Sdim // In most cases, there are no optional arguments. 333249423Sdim RequiredArgs required = RequiredArgs::All; 334249423Sdim 335249423Sdim // If we have a variadic prototype, the required arguments are the 336249423Sdim // extra prefix plus the arguments in the prototype. 337249423Sdim if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 338249423Sdim if (proto->isVariadic()) 339249423Sdim required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs); 340249423Sdim 341249423Sdim // If we don't have a prototype at all, but we're supposed to 342249423Sdim // explicitly use the variadic convention for unprototyped calls, 343249423Sdim // treat all of the arguments as required but preserve the nominal 344249423Sdim // possibility of variadics. 345249423Sdim } else if (CGT.CGM.getTargetCodeGenInfo() 346249423Sdim .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) { 347249423Sdim required = RequiredArgs(args.size()); 348249423Sdim } 349249423Sdim 350249423Sdim return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args, 351249423Sdim fnType->getExtInfo(), required); 352249423Sdim} 353249423Sdim 354234353Sdim/// Figure out the rules for calling a function with the given formal 355234353Sdim/// type using the given arguments. The arguments are necessary 356234353Sdim/// because the function might be unprototyped, in which case it's 357234353Sdim/// target-dependent in crazy ways. 358234353Sdimconst CGFunctionInfo & 359239462SdimCodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 360239462Sdim const FunctionType *fnType) { 361249423Sdim return arrangeFreeFunctionLikeCall(*this, args, fnType, 0); 362249423Sdim} 363234353Sdim 364249423Sdim/// A block function call is essentially a free-function call with an 365249423Sdim/// extra implicit argument. 366249423Sdimconst CGFunctionInfo & 367249423SdimCodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 368249423Sdim const FunctionType *fnType) { 369249423Sdim return arrangeFreeFunctionLikeCall(*this, args, fnType, 1); 370234353Sdim} 371234353Sdim 372234353Sdimconst CGFunctionInfo & 373239462SdimCodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 374239462Sdim const CallArgList &args, 375239462Sdim FunctionType::ExtInfo info, 376239462Sdim RequiredArgs required) { 377193326Sed // FIXME: Kill copy. 378234353Sdim SmallVector<CanQualType, 16> argTypes; 379234353Sdim for (CallArgList::const_iterator i = args.begin(), e = args.end(); 380193326Sed i != e; ++i) 381234353Sdim argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 382239462Sdim return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 383239462Sdim required); 384193326Sed} 385193326Sed 386239462Sdim/// Arrange a call to a C++ method, passing the given arguments. 387234353Sdimconst CGFunctionInfo & 388239462SdimCodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 389239462Sdim const FunctionProtoType *FPT, 390239462Sdim RequiredArgs required) { 391239462Sdim // FIXME: Kill copy. 392239462Sdim SmallVector<CanQualType, 16> argTypes; 393239462Sdim for (CallArgList::const_iterator i = args.begin(), e = args.end(); 394239462Sdim i != e; ++i) 395239462Sdim argTypes.push_back(Context.getCanonicalParamType(i->Ty)); 396239462Sdim 397239462Sdim FunctionType::ExtInfo info = FPT->getExtInfo(); 398239462Sdim adjustCXXMethodInfo(*this, info, FPT->isVariadic()); 399239462Sdim return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()), 400239462Sdim argTypes, info, required); 401239462Sdim} 402239462Sdim 403239462Sdimconst CGFunctionInfo & 404234353SdimCodeGenTypes::arrangeFunctionDeclaration(QualType resultType, 405234353Sdim const FunctionArgList &args, 406234353Sdim const FunctionType::ExtInfo &info, 407234353Sdim bool isVariadic) { 408193326Sed // FIXME: Kill copy. 409234353Sdim SmallVector<CanQualType, 16> argTypes; 410234353Sdim for (FunctionArgList::const_iterator i = args.begin(), e = args.end(); 411193326Sed i != e; ++i) 412234353Sdim argTypes.push_back(Context.getCanonicalParamType((*i)->getType())); 413234353Sdim 414234353Sdim RequiredArgs required = 415234353Sdim (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 416239462Sdim return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info, 417239462Sdim required); 418193326Sed} 419193326Sed 420234353Sdimconst CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 421251662Sdim return arrangeLLVMFunctionInfo(getContext().VoidTy, None, 422239462Sdim FunctionType::ExtInfo(), RequiredArgs::All); 423221345Sdim} 424221345Sdim 425234353Sdim/// Arrange the argument and result information for an abstract value 426234353Sdim/// of a given function type. This is the method which all of the 427234353Sdim/// above functions ultimately defer to. 428234353Sdimconst CGFunctionInfo & 429239462SdimCodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 430239462Sdim ArrayRef<CanQualType> argTypes, 431239462Sdim FunctionType::ExtInfo info, 432239462Sdim RequiredArgs required) { 433204643Srdivacky#ifndef NDEBUG 434234353Sdim for (ArrayRef<CanQualType>::const_iterator 435234353Sdim I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 436204643Srdivacky assert(I->isCanonicalAsParam()); 437204643Srdivacky#endif 438204643Srdivacky 439234353Sdim unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 440203955Srdivacky 441193326Sed // Lookup or create unique function info. 442193326Sed llvm::FoldingSetNodeID ID; 443234353Sdim CGFunctionInfo::Profile(ID, info, required, resultType, argTypes); 444193326Sed 445234353Sdim void *insertPos = 0; 446234353Sdim CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 447193326Sed if (FI) 448193326Sed return *FI; 449193326Sed 450234353Sdim // Construct the function info. We co-allocate the ArgInfos. 451234353Sdim FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required); 452234353Sdim FunctionInfos.InsertNode(FI, insertPos); 453193326Sed 454234353Sdim bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 455234353Sdim assert(inserted && "Recursively being processed?"); 456224145Sdim 457212904Sdim // Compute ABI information. 458212904Sdim getABIInfo().computeInfo(*FI); 459218893Sdim 460212904Sdim // Loop over all of the computed argument and return value info. If any of 461212904Sdim // them are direct or extend without a specified coerce type, specify the 462212904Sdim // default now. 463234353Sdim ABIArgInfo &retInfo = FI->getReturnInfo(); 464234353Sdim if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0) 465234353Sdim retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 466218893Sdim 467212904Sdim for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end(); 468212904Sdim I != E; ++I) 469212904Sdim if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0) 470224145Sdim I->info.setCoerceToType(ConvertType(I->type)); 471193326Sed 472234353Sdim bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 473234353Sdim assert(erased && "Not in set?"); 474224145Sdim 475193326Sed return *FI; 476193326Sed} 477193326Sed 478234353SdimCGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 479234353Sdim const FunctionType::ExtInfo &info, 480234353Sdim CanQualType resultType, 481234353Sdim ArrayRef<CanQualType> argTypes, 482234353Sdim RequiredArgs required) { 483234353Sdim void *buffer = operator new(sizeof(CGFunctionInfo) + 484234353Sdim sizeof(ArgInfo) * (argTypes.size() + 1)); 485234353Sdim CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 486234353Sdim FI->CallingConvention = llvmCC; 487234353Sdim FI->EffectiveCallingConvention = llvmCC; 488234353Sdim FI->ASTCallingConvention = info.getCC(); 489234353Sdim FI->NoReturn = info.getNoReturn(); 490234353Sdim FI->ReturnsRetained = info.getProducesResult(); 491234353Sdim FI->Required = required; 492234353Sdim FI->HasRegParm = info.getHasRegParm(); 493234353Sdim FI->RegParm = info.getRegParm(); 494234353Sdim FI->NumArgs = argTypes.size(); 495234353Sdim FI->getArgsBuffer()[0].type = resultType; 496234353Sdim for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 497234353Sdim FI->getArgsBuffer()[i + 1].type = argTypes[i]; 498234353Sdim return FI; 499193326Sed} 500193326Sed 501193326Sed/***/ 502193326Sed 503223017Sdimvoid CodeGenTypes::GetExpandedTypes(QualType type, 504226633Sdim SmallVectorImpl<llvm::Type*> &expandedTypes) { 505226633Sdim if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 506226633Sdim uint64_t NumElts = AT->getSize().getZExtValue(); 507226633Sdim for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 508226633Sdim GetExpandedTypes(AT->getElementType(), expandedTypes); 509234982Sdim } else if (const RecordType *RT = type->getAs<RecordType>()) { 510226633Sdim const RecordDecl *RD = RT->getDecl(); 511226633Sdim assert(!RD->hasFlexibleArrayMember() && 512226633Sdim "Cannot expand structure with flexible array."); 513234982Sdim if (RD->isUnion()) { 514234982Sdim // Unions can be here only in degenerative cases - all the fields are same 515234982Sdim // after flattening. Thus we have to use the "largest" field. 516234982Sdim const FieldDecl *LargestFD = 0; 517234982Sdim CharUnits UnionSize = CharUnits::Zero(); 518234982Sdim 519234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 520234982Sdim i != e; ++i) { 521234982Sdim const FieldDecl *FD = *i; 522234982Sdim assert(!FD->isBitField() && 523234982Sdim "Cannot expand structure with bit-field members."); 524234982Sdim CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 525234982Sdim if (UnionSize < FieldSize) { 526234982Sdim UnionSize = FieldSize; 527234982Sdim LargestFD = FD; 528234982Sdim } 529234982Sdim } 530234982Sdim if (LargestFD) 531234982Sdim GetExpandedTypes(LargestFD->getType(), expandedTypes); 532234982Sdim } else { 533234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 534234982Sdim i != e; ++i) { 535239462Sdim assert(!i->isBitField() && 536234982Sdim "Cannot expand structure with bit-field members."); 537239462Sdim GetExpandedTypes(i->getType(), expandedTypes); 538234982Sdim } 539226633Sdim } 540226633Sdim } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 541226633Sdim llvm::Type *EltTy = ConvertType(CT->getElementType()); 542226633Sdim expandedTypes.push_back(EltTy); 543226633Sdim expandedTypes.push_back(EltTy); 544226633Sdim } else 545226633Sdim expandedTypes.push_back(ConvertType(type)); 546193326Sed} 547193326Sed 548198092Srdivackyllvm::Function::arg_iterator 549193326SedCodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 550193326Sed llvm::Function::arg_iterator AI) { 551198092Srdivacky assert(LV.isSimple() && 552198092Srdivacky "Unexpected non-simple lvalue during struct expansion."); 553226633Sdim 554226633Sdim if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 555226633Sdim unsigned NumElts = AT->getSize().getZExtValue(); 556226633Sdim QualType EltTy = AT->getElementType(); 557226633Sdim for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 558234982Sdim llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 559226633Sdim LValue LV = MakeAddrLValue(EltAddr, EltTy); 560226633Sdim AI = ExpandTypeFromArgs(EltTy, LV, AI); 561226633Sdim } 562234982Sdim } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 563226633Sdim RecordDecl *RD = RT->getDecl(); 564234982Sdim if (RD->isUnion()) { 565234982Sdim // Unions can be here only in degenerative cases - all the fields are same 566234982Sdim // after flattening. Thus we have to use the "largest" field. 567234982Sdim const FieldDecl *LargestFD = 0; 568234982Sdim CharUnits UnionSize = CharUnits::Zero(); 569193326Sed 570234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 571234982Sdim i != e; ++i) { 572234982Sdim const FieldDecl *FD = *i; 573234982Sdim assert(!FD->isBitField() && 574234982Sdim "Cannot expand structure with bit-field members."); 575234982Sdim CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 576234982Sdim if (UnionSize < FieldSize) { 577234982Sdim UnionSize = FieldSize; 578234982Sdim LargestFD = FD; 579234982Sdim } 580234982Sdim } 581234982Sdim if (LargestFD) { 582234982Sdim // FIXME: What are the right qualifiers here? 583234982Sdim LValue SubLV = EmitLValueForField(LV, LargestFD); 584234982Sdim AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 585234982Sdim } 586234982Sdim } else { 587234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 588234982Sdim i != e; ++i) { 589234982Sdim FieldDecl *FD = *i; 590234982Sdim QualType FT = FD->getType(); 591234982Sdim 592234982Sdim // FIXME: What are the right qualifiers here? 593234982Sdim LValue SubLV = EmitLValueForField(LV, FD); 594234982Sdim AI = ExpandTypeFromArgs(FT, SubLV, AI); 595234982Sdim } 596193326Sed } 597226633Sdim } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 598226633Sdim QualType EltTy = CT->getElementType(); 599234982Sdim llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 600226633Sdim EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 601234982Sdim llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 602226633Sdim EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 603226633Sdim } else { 604226633Sdim EmitStoreThroughLValue(RValue::get(AI), LV); 605226633Sdim ++AI; 606193326Sed } 607193326Sed 608193326Sed return AI; 609193326Sed} 610193326Sed 611210299Sed/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 612210299Sed/// accessing some number of bytes out of it, try to gep into the struct to get 613210299Sed/// at its inner goodness. Dive as deep as possible without entering an element 614210299Sed/// with an in-memory size smaller than DstSize. 615210299Sedstatic llvm::Value * 616210299SedEnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 617226633Sdim llvm::StructType *SrcSTy, 618210299Sed uint64_t DstSize, CodeGenFunction &CGF) { 619210299Sed // We can't dive into a zero-element struct. 620210299Sed if (SrcSTy->getNumElements() == 0) return SrcPtr; 621218893Sdim 622226633Sdim llvm::Type *FirstElt = SrcSTy->getElementType(0); 623218893Sdim 624210299Sed // If the first elt is at least as large as what we're looking for, or if the 625210299Sed // first element is the same size as the whole struct, we can enter it. 626218893Sdim uint64_t FirstEltSize = 627243830Sdim CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 628218893Sdim if (FirstEltSize < DstSize && 629243830Sdim FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 630210299Sed return SrcPtr; 631218893Sdim 632210299Sed // GEP into the first element. 633210299Sed SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 634218893Sdim 635210299Sed // If the first element is a struct, recurse. 636226633Sdim llvm::Type *SrcTy = 637210299Sed cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 638226633Sdim if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 639210299Sed return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 640210299Sed 641210299Sed return SrcPtr; 642210299Sed} 643210299Sed 644210299Sed/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 645210299Sed/// are either integers or pointers. This does a truncation of the value if it 646210299Sed/// is too large or a zero extension if it is too small. 647210299Sedstatic llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 648226633Sdim llvm::Type *Ty, 649210299Sed CodeGenFunction &CGF) { 650210299Sed if (Val->getType() == Ty) 651210299Sed return Val; 652218893Sdim 653210299Sed if (isa<llvm::PointerType>(Val->getType())) { 654210299Sed // If this is Pointer->Pointer avoid conversion to and from int. 655210299Sed if (isa<llvm::PointerType>(Ty)) 656210299Sed return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 657218893Sdim 658210299Sed // Convert the pointer to an integer so we can play with its width. 659210299Sed Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 660210299Sed } 661218893Sdim 662226633Sdim llvm::Type *DestIntTy = Ty; 663210299Sed if (isa<llvm::PointerType>(DestIntTy)) 664210299Sed DestIntTy = CGF.IntPtrTy; 665218893Sdim 666210299Sed if (Val->getType() != DestIntTy) 667210299Sed Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 668218893Sdim 669210299Sed if (isa<llvm::PointerType>(Ty)) 670210299Sed Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 671210299Sed return Val; 672210299Sed} 673210299Sed 674210299Sed 675210299Sed 676193326Sed/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 677193326Sed/// a pointer to an object of type \arg Ty. 678193326Sed/// 679193326Sed/// This safely handles the case when the src type is smaller than the 680193326Sed/// destination type; in this situation the values of bits which not 681193326Sed/// present in the src are undefined. 682193326Sedstatic llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 683226633Sdim llvm::Type *Ty, 684193326Sed CodeGenFunction &CGF) { 685226633Sdim llvm::Type *SrcTy = 686193326Sed cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 687218893Sdim 688210299Sed // If SrcTy and Ty are the same, just do a load. 689210299Sed if (SrcTy == Ty) 690210299Sed return CGF.Builder.CreateLoad(SrcPtr); 691218893Sdim 692243830Sdim uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 693218893Sdim 694226633Sdim if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 695210299Sed SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 696210299Sed SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 697210299Sed } 698218893Sdim 699243830Sdim uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 700193326Sed 701210299Sed // If the source and destination are integer or pointer types, just do an 702210299Sed // extension or truncation to the desired type. 703210299Sed if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 704210299Sed (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 705210299Sed llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 706210299Sed return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 707210299Sed } 708218893Sdim 709193326Sed // If load is legal, just bitcast the src pointer. 710193326Sed if (SrcSize >= DstSize) { 711193326Sed // Generally SrcSize is never greater than DstSize, since this means we are 712193326Sed // losing bits. However, this can happen in cases where the structure has 713193326Sed // additional padding, for example due to a user specified alignment. 714193326Sed // 715193326Sed // FIXME: Assert that we aren't truncating non-padding bits when have access 716193326Sed // to that information. 717193326Sed llvm::Value *Casted = 718193326Sed CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 719193326Sed llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 720193326Sed // FIXME: Use better alignment / avoid requiring aligned load. 721193326Sed Load->setAlignment(1); 722193326Sed return Load; 723193326Sed } 724218893Sdim 725210299Sed // Otherwise do coercion through memory. This is stupid, but 726210299Sed // simple. 727210299Sed llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 728249423Sdim llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 729249423Sdim llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 730249423Sdim llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 731249423Sdim // FIXME: Use better alignment. 732249423Sdim CGF.Builder.CreateMemCpy(Casted, SrcCasted, 733249423Sdim llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 734249423Sdim 1, false); 735210299Sed return CGF.Builder.CreateLoad(Tmp); 736193326Sed} 737193326Sed 738223017Sdim// Function to store a first-class aggregate into memory. We prefer to 739223017Sdim// store the elements rather than the aggregate to be more friendly to 740223017Sdim// fast-isel. 741223017Sdim// FIXME: Do we need to recurse here? 742223017Sdimstatic void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 743223017Sdim llvm::Value *DestPtr, bool DestIsVolatile, 744223017Sdim bool LowAlignment) { 745223017Sdim // Prefer scalar stores to first-class aggregate stores. 746226633Sdim if (llvm::StructType *STy = 747223017Sdim dyn_cast<llvm::StructType>(Val->getType())) { 748223017Sdim for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 749223017Sdim llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 750223017Sdim llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 751223017Sdim llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 752223017Sdim DestIsVolatile); 753223017Sdim if (LowAlignment) 754223017Sdim SI->setAlignment(1); 755223017Sdim } 756223017Sdim } else { 757234353Sdim llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 758234353Sdim if (LowAlignment) 759234353Sdim SI->setAlignment(1); 760223017Sdim } 761223017Sdim} 762223017Sdim 763193326Sed/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 764193326Sed/// where the source and destination may have different types. 765193326Sed/// 766193326Sed/// This safely handles the case when the src type is larger than the 767193326Sed/// destination type; the upper bits of the src will be lost. 768193326Sedstatic void CreateCoercedStore(llvm::Value *Src, 769193326Sed llvm::Value *DstPtr, 770201361Srdivacky bool DstIsVolatile, 771193326Sed CodeGenFunction &CGF) { 772226633Sdim llvm::Type *SrcTy = Src->getType(); 773226633Sdim llvm::Type *DstTy = 774193326Sed cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 775210299Sed if (SrcTy == DstTy) { 776210299Sed CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 777210299Sed return; 778210299Sed } 779218893Sdim 780243830Sdim uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 781218893Sdim 782226633Sdim if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 783210299Sed DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 784210299Sed DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 785210299Sed } 786218893Sdim 787210299Sed // If the source and destination are integer or pointer types, just do an 788210299Sed // extension or truncation to the desired type. 789210299Sed if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 790210299Sed (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 791210299Sed Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 792210299Sed CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 793210299Sed return; 794210299Sed } 795218893Sdim 796243830Sdim uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 797193326Sed 798193326Sed // If store is legal, just bitcast the src pointer. 799193576Sed if (SrcSize <= DstSize) { 800193326Sed llvm::Value *Casted = 801193326Sed CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 802193326Sed // FIXME: Use better alignment / avoid requiring aligned store. 803223017Sdim BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 804193326Sed } else { 805193326Sed // Otherwise do coercion through memory. This is stupid, but 806193326Sed // simple. 807193576Sed 808193576Sed // Generally SrcSize is never greater than DstSize, since this means we are 809193576Sed // losing bits. However, this can happen in cases where the structure has 810193576Sed // additional padding, for example due to a user specified alignment. 811193576Sed // 812193576Sed // FIXME: Assert that we aren't truncating non-padding bits when have access 813193576Sed // to that information. 814193326Sed llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 815193326Sed CGF.Builder.CreateStore(Src, Tmp); 816249423Sdim llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 817249423Sdim llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 818249423Sdim llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 819249423Sdim // FIXME: Use better alignment. 820249423Sdim CGF.Builder.CreateMemCpy(DstCasted, Casted, 821249423Sdim llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 822249423Sdim 1, false); 823193326Sed } 824193326Sed} 825193326Sed 826193326Sed/***/ 827193326Sed 828210299Sedbool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 829193326Sed return FI.getReturnInfo().isIndirect(); 830193326Sed} 831193326Sed 832210299Sedbool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 833210299Sed if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 834210299Sed switch (BT->getKind()) { 835210299Sed default: 836210299Sed return false; 837210299Sed case BuiltinType::Float: 838251662Sdim return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 839210299Sed case BuiltinType::Double: 840251662Sdim return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 841210299Sed case BuiltinType::LongDouble: 842251662Sdim return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 843210299Sed } 844210299Sed } 845210299Sed 846210299Sed return false; 847210299Sed} 848210299Sed 849234353Sdimbool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 850234353Sdim if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 851234353Sdim if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 852234353Sdim if (BT->getKind() == BuiltinType::LongDouble) 853251662Sdim return getTarget().useObjCFP2RetForComplexLongDouble(); 854234353Sdim } 855234353Sdim } 856218893Sdim 857234353Sdim return false; 858234353Sdim} 859204643Srdivacky 860234353Sdimllvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 861234353Sdim const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 862234353Sdim return GetFunctionType(FI); 863204643Srdivacky} 864204643Srdivacky 865224145Sdimllvm::FunctionType * 866234353SdimCodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 867224145Sdim 868224145Sdim bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 869224145Sdim assert(Inserted && "Recursively being processed?"); 870224145Sdim 871226633Sdim SmallVector<llvm::Type*, 8> argTypes; 872226633Sdim llvm::Type *resultType = 0; 873193326Sed 874223017Sdim const ABIArgInfo &retAI = FI.getReturnInfo(); 875223017Sdim switch (retAI.getKind()) { 876193326Sed case ABIArgInfo::Expand: 877223017Sdim llvm_unreachable("Invalid ABI kind for return argument"); 878193326Sed 879193631Sed case ABIArgInfo::Extend: 880193326Sed case ABIArgInfo::Direct: 881223017Sdim resultType = retAI.getCoerceToType(); 882193326Sed break; 883193326Sed 884193326Sed case ABIArgInfo::Indirect: { 885223017Sdim assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 886223017Sdim resultType = llvm::Type::getVoidTy(getLLVMContext()); 887223017Sdim 888223017Sdim QualType ret = FI.getReturnType(); 889226633Sdim llvm::Type *ty = ConvertType(ret); 890223017Sdim unsigned addressSpace = Context.getTargetAddressSpace(ret); 891223017Sdim argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 892193326Sed break; 893193326Sed } 894193326Sed 895193326Sed case ABIArgInfo::Ignore: 896223017Sdim resultType = llvm::Type::getVoidTy(getLLVMContext()); 897193326Sed break; 898193326Sed } 899198092Srdivacky 900249423Sdim // Add in all of the required arguments. 901249423Sdim CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 902249423Sdim if (FI.isVariadic()) { 903249423Sdim ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 904249423Sdim } else { 905249423Sdim ie = FI.arg_end(); 906249423Sdim } 907249423Sdim for (; it != ie; ++it) { 908223017Sdim const ABIArgInfo &argAI = it->info; 909198092Srdivacky 910243830Sdim // Insert a padding type to ensure proper alignment. 911243830Sdim if (llvm::Type *PaddingType = argAI.getPaddingType()) 912243830Sdim argTypes.push_back(PaddingType); 913243830Sdim 914223017Sdim switch (argAI.getKind()) { 915193326Sed case ABIArgInfo::Ignore: 916193326Sed break; 917193326Sed 918212904Sdim case ABIArgInfo::Indirect: { 919212904Sdim // indirect arguments are always on the stack, which is addr space #0. 920226633Sdim llvm::Type *LTy = ConvertTypeForMem(it->type); 921223017Sdim argTypes.push_back(LTy->getPointerTo()); 922212904Sdim break; 923212904Sdim } 924212904Sdim 925212904Sdim case ABIArgInfo::Extend: 926212904Sdim case ABIArgInfo::Direct: { 927210299Sed // If the coerce-to type is a first class aggregate, flatten it. Either 928210299Sed // way is semantically identical, but fast-isel and the optimizer 929210299Sed // generally likes scalar values better than FCAs. 930224145Sdim llvm::Type *argType = argAI.getCoerceToType(); 931226633Sdim if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) { 932223017Sdim for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 933223017Sdim argTypes.push_back(st->getElementType(i)); 934210299Sed } else { 935223017Sdim argTypes.push_back(argType); 936210299Sed } 937193326Sed break; 938210299Sed } 939193326Sed 940193326Sed case ABIArgInfo::Expand: 941224145Sdim GetExpandedTypes(it->type, argTypes); 942193326Sed break; 943193326Sed } 944193326Sed } 945193326Sed 946224145Sdim bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 947224145Sdim assert(Erased && "Not in set?"); 948224145Sdim 949234353Sdim return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 950193326Sed} 951193326Sed 952226633Sdimllvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 953212904Sdim const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 954199990Srdivacky const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 955218893Sdim 956224145Sdim if (!isFuncTypeConvertible(FPT)) 957224145Sdim return llvm::StructType::get(getLLVMContext()); 958224145Sdim 959224145Sdim const CGFunctionInfo *Info; 960224145Sdim if (isa<CXXDestructorDecl>(MD)) 961234353Sdim Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 962224145Sdim else 963234353Sdim Info = &arrangeCXXMethodDeclaration(MD); 964234353Sdim return GetFunctionType(*Info); 965199990Srdivacky} 966199990Srdivacky 967193326Sedvoid CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 968193326Sed const Decl *TargetDecl, 969218893Sdim AttributeListType &PAL, 970249423Sdim unsigned &CallingConv, 971249423Sdim bool AttrOnCallSite) { 972243830Sdim llvm::AttrBuilder FuncAttrs; 973243830Sdim llvm::AttrBuilder RetAttrs; 974193326Sed 975198092Srdivacky CallingConv = FI.getEffectiveCallingConvention(); 976198092Srdivacky 977203955Srdivacky if (FI.isNoReturn()) 978249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 979203955Srdivacky 980193326Sed // FIXME: handle sseregparm someday... 981193326Sed if (TargetDecl) { 982226633Sdim if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 983249423Sdim FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 984195341Sed if (TargetDecl->hasAttr<NoThrowAttr>()) 985249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 986249423Sdim if (TargetDecl->hasAttr<NoReturnAttr>()) 987249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 988249423Sdim 989249423Sdim if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 990210299Sed const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 991221345Sdim if (FPT && FPT->isNothrow(getContext())) 992249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 993249423Sdim // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 994249423Sdim // These attributes are not inherited by overloads. 995249423Sdim const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 996249423Sdim if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 997249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 998210299Sed } 999210299Sed 1000226633Sdim // 'const' and 'pure' attribute functions are also nounwind. 1001226633Sdim if (TargetDecl->hasAttr<ConstAttr>()) { 1002249423Sdim FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1003249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1004226633Sdim } else if (TargetDecl->hasAttr<PureAttr>()) { 1005249423Sdim FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1006249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1007226633Sdim } 1008198092Srdivacky if (TargetDecl->hasAttr<MallocAttr>()) 1009249423Sdim RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1010193326Sed } 1011193326Sed 1012199482Srdivacky if (CodeGenOpts.OptimizeSize) 1013249423Sdim FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1014243830Sdim if (CodeGenOpts.OptimizeSize == 2) 1015249423Sdim FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1016199482Srdivacky if (CodeGenOpts.DisableRedZone) 1017249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1018199482Srdivacky if (CodeGenOpts.NoImplicitFloat) 1019249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1020193576Sed 1021249423Sdim if (AttrOnCallSite) { 1022249423Sdim // Attributes that should go on the call site only. 1023249423Sdim if (!CodeGenOpts.SimplifyLibCalls) 1024249423Sdim FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1025249423Sdim } else { 1026249423Sdim // Attributes that should go on the function, but not the call site. 1027249423Sdim if (!CodeGenOpts.DisableFPElim) { 1028249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1029249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false"); 1030249423Sdim } else if (CodeGenOpts.OmitLeafFramePointer) { 1031249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1032249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true"); 1033249423Sdim } else { 1034249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1035249423Sdim FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true"); 1036249423Sdim } 1037249423Sdim 1038249423Sdim FuncAttrs.addAttribute("less-precise-fpmad", 1039249423Sdim CodeGenOpts.LessPreciseFPMAD ? "true" : "false"); 1040249423Sdim FuncAttrs.addAttribute("no-infs-fp-math", 1041249423Sdim CodeGenOpts.NoInfsFPMath ? "true" : "false"); 1042249423Sdim FuncAttrs.addAttribute("no-nans-fp-math", 1043249423Sdim CodeGenOpts.NoNaNsFPMath ? "true" : "false"); 1044249423Sdim FuncAttrs.addAttribute("unsafe-fp-math", 1045249423Sdim CodeGenOpts.UnsafeFPMath ? "true" : "false"); 1046249423Sdim FuncAttrs.addAttribute("use-soft-float", 1047249423Sdim CodeGenOpts.SoftFloat ? "true" : "false"); 1048249423Sdim } 1049249423Sdim 1050193326Sed QualType RetTy = FI.getReturnType(); 1051193326Sed unsigned Index = 1; 1052193326Sed const ABIArgInfo &RetAI = FI.getReturnInfo(); 1053193326Sed switch (RetAI.getKind()) { 1054193631Sed case ABIArgInfo::Extend: 1055212904Sdim if (RetTy->hasSignedIntegerRepresentation()) 1056249423Sdim RetAttrs.addAttribute(llvm::Attribute::SExt); 1057212904Sdim else if (RetTy->hasUnsignedIntegerRepresentation()) 1058249423Sdim RetAttrs.addAttribute(llvm::Attribute::ZExt); 1059212904Sdim break; 1060193326Sed case ABIArgInfo::Direct: 1061212904Sdim case ABIArgInfo::Ignore: 1062193326Sed break; 1063193326Sed 1064239462Sdim case ABIArgInfo::Indirect: { 1065243830Sdim llvm::AttrBuilder SRETAttrs; 1066249423Sdim SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1067239462Sdim if (RetAI.getInReg()) 1068249423Sdim SRETAttrs.addAttribute(llvm::Attribute::InReg); 1069243830Sdim PAL.push_back(llvm:: 1070249423Sdim AttributeSet::get(getLLVMContext(), Index, SRETAttrs)); 1071239462Sdim 1072193326Sed ++Index; 1073193326Sed // sret disables readnone and readonly 1074249423Sdim FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1075249423Sdim .removeAttribute(llvm::Attribute::ReadNone); 1076193326Sed break; 1077239462Sdim } 1078193326Sed 1079193326Sed case ABIArgInfo::Expand: 1080226633Sdim llvm_unreachable("Invalid ABI kind for return argument"); 1081193326Sed } 1082193326Sed 1083243830Sdim if (RetAttrs.hasAttributes()) 1084243830Sdim PAL.push_back(llvm:: 1085249423Sdim AttributeSet::get(getLLVMContext(), 1086249423Sdim llvm::AttributeSet::ReturnIndex, 1087249423Sdim RetAttrs)); 1088193326Sed 1089198092Srdivacky for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1090193326Sed ie = FI.arg_end(); it != ie; ++it) { 1091193326Sed QualType ParamType = it->type; 1092193326Sed const ABIArgInfo &AI = it->info; 1093243830Sdim llvm::AttrBuilder Attrs; 1094193326Sed 1095243830Sdim if (AI.getPaddingType()) { 1096249423Sdim if (AI.getPaddingInReg()) 1097249423Sdim PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, 1098249423Sdim llvm::Attribute::InReg)); 1099243830Sdim // Increment Index if there is padding. 1100243830Sdim ++Index; 1101243830Sdim } 1102243830Sdim 1103206084Srdivacky // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1104206084Srdivacky // have the corresponding parameter variable. It doesn't make 1105218893Sdim // sense to do it here because parameters are so messed up. 1106193326Sed switch (AI.getKind()) { 1107212904Sdim case ABIArgInfo::Extend: 1108223017Sdim if (ParamType->isSignedIntegerOrEnumerationType()) 1109249423Sdim Attrs.addAttribute(llvm::Attribute::SExt); 1110223017Sdim else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1111249423Sdim Attrs.addAttribute(llvm::Attribute::ZExt); 1112212904Sdim // FALL THROUGH 1113212904Sdim case ABIArgInfo::Direct: 1114239462Sdim if (AI.getInReg()) 1115249423Sdim Attrs.addAttribute(llvm::Attribute::InReg); 1116239462Sdim 1117212904Sdim // FIXME: handle sseregparm someday... 1118218893Sdim 1119226633Sdim if (llvm::StructType *STy = 1120239462Sdim dyn_cast<llvm::StructType>(AI.getCoerceToType())) { 1121239462Sdim unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1122243830Sdim if (Attrs.hasAttributes()) 1123239462Sdim for (unsigned I = 0; I < Extra; ++I) 1124249423Sdim PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I, 1125249423Sdim Attrs)); 1126239462Sdim Index += Extra; 1127239462Sdim } 1128212904Sdim break; 1129193326Sed 1130193326Sed case ABIArgInfo::Indirect: 1131243830Sdim if (AI.getInReg()) 1132249423Sdim Attrs.addAttribute(llvm::Attribute::InReg); 1133243830Sdim 1134198092Srdivacky if (AI.getIndirectByVal()) 1135249423Sdim Attrs.addAttribute(llvm::Attribute::ByVal); 1136198092Srdivacky 1137243830Sdim Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1138243830Sdim 1139193326Sed // byval disables readnone and readonly. 1140249423Sdim FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1141249423Sdim .removeAttribute(llvm::Attribute::ReadNone); 1142193326Sed break; 1143193631Sed 1144193326Sed case ABIArgInfo::Ignore: 1145193326Sed // Skip increment, no matching LLVM parameter. 1146198092Srdivacky continue; 1147193326Sed 1148193326Sed case ABIArgInfo::Expand: { 1149226633Sdim SmallVector<llvm::Type*, 8> types; 1150193326Sed // FIXME: This is rather inefficient. Do we ever actually need to do 1151193326Sed // anything here? The result should be just reconstructed on the other 1152193326Sed // side, so extension should be a non-issue. 1153224145Sdim getTypes().GetExpandedTypes(ParamType, types); 1154223017Sdim Index += types.size(); 1155193326Sed continue; 1156193326Sed } 1157193326Sed } 1158198092Srdivacky 1159243830Sdim if (Attrs.hasAttributes()) 1160249423Sdim PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1161193326Sed ++Index; 1162193326Sed } 1163243830Sdim if (FuncAttrs.hasAttributes()) 1164243830Sdim PAL.push_back(llvm:: 1165249423Sdim AttributeSet::get(getLLVMContext(), 1166249423Sdim llvm::AttributeSet::FunctionIndex, 1167249423Sdim FuncAttrs)); 1168193326Sed} 1169193326Sed 1170221345Sdim/// An argument came in as a promoted argument; demote it back to its 1171221345Sdim/// declared type. 1172221345Sdimstatic llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1173221345Sdim const VarDecl *var, 1174221345Sdim llvm::Value *value) { 1175226633Sdim llvm::Type *varType = CGF.ConvertType(var->getType()); 1176221345Sdim 1177221345Sdim // This can happen with promotions that actually don't change the 1178221345Sdim // underlying type, like the enum promotions. 1179221345Sdim if (value->getType() == varType) return value; 1180221345Sdim 1181221345Sdim assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1182221345Sdim && "unexpected promotion type"); 1183221345Sdim 1184221345Sdim if (isa<llvm::IntegerType>(varType)) 1185221345Sdim return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1186221345Sdim 1187221345Sdim return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1188221345Sdim} 1189221345Sdim 1190193326Sedvoid CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1191193326Sed llvm::Function *Fn, 1192193326Sed const FunctionArgList &Args) { 1193198092Srdivacky // If this is an implicit-return-zero function, go ahead and 1194198092Srdivacky // initialize the return value. TODO: it might be nice to have 1195198092Srdivacky // a more general mechanism for this that didn't require synthesized 1196198092Srdivacky // return statements. 1197251662Sdim if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1198198092Srdivacky if (FD->hasImplicitReturnZero()) { 1199198092Srdivacky QualType RetTy = FD->getResultType().getUnqualifiedType(); 1200226633Sdim llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1201198092Srdivacky llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1202198092Srdivacky Builder.CreateStore(Zero, ReturnValue); 1203198092Srdivacky } 1204198092Srdivacky } 1205198092Srdivacky 1206193326Sed // FIXME: We no longer need the types from FunctionArgList; lift up and 1207193326Sed // simplify. 1208193326Sed 1209193326Sed // Emit allocs for param decls. Give the LLVM Argument nodes names. 1210193326Sed llvm::Function::arg_iterator AI = Fn->arg_begin(); 1211198092Srdivacky 1212193326Sed // Name the struct return argument. 1213210299Sed if (CGM.ReturnTypeUsesSRet(FI)) { 1214193326Sed AI->setName("agg.result"); 1215249423Sdim AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1216249423Sdim AI->getArgNo() + 1, 1217249423Sdim llvm::Attribute::NoAlias)); 1218193326Sed ++AI; 1219193326Sed } 1220198092Srdivacky 1221193326Sed assert(FI.arg_size() == Args.size() && 1222193326Sed "Mismatch between function signature & arguments."); 1223221345Sdim unsigned ArgNo = 1; 1224193326Sed CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1225221345Sdim for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1226221345Sdim i != e; ++i, ++info_it, ++ArgNo) { 1227221345Sdim const VarDecl *Arg = *i; 1228193326Sed QualType Ty = info_it->type; 1229193326Sed const ABIArgInfo &ArgI = info_it->info; 1230193326Sed 1231221345Sdim bool isPromoted = 1232221345Sdim isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1233221345Sdim 1234243830Sdim // Skip the dummy padding argument. 1235243830Sdim if (ArgI.getPaddingType()) 1236243830Sdim ++AI; 1237243830Sdim 1238193326Sed switch (ArgI.getKind()) { 1239193326Sed case ABIArgInfo::Indirect: { 1240210299Sed llvm::Value *V = AI; 1241218893Sdim 1242249423Sdim if (!hasScalarEvaluationKind(Ty)) { 1243218893Sdim // Aggregates and complex variables are accessed by reference. All we 1244218893Sdim // need to do is realign the value, if requested 1245218893Sdim if (ArgI.getIndirectRealign()) { 1246218893Sdim llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1247218893Sdim 1248218893Sdim // Copy from the incoming argument pointer to the temporary with the 1249218893Sdim // appropriate alignment. 1250218893Sdim // 1251218893Sdim // FIXME: We should have a common utility for generating an aggregate 1252218893Sdim // copy. 1253226633Sdim llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1254218893Sdim CharUnits Size = getContext().getTypeSizeInChars(Ty); 1255221345Sdim llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1256221345Sdim llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1257221345Sdim Builder.CreateMemCpy(Dst, 1258221345Sdim Src, 1259218893Sdim llvm::ConstantInt::get(IntPtrTy, 1260218893Sdim Size.getQuantity()), 1261218893Sdim ArgI.getIndirectAlign(), 1262218893Sdim false); 1263218893Sdim V = AlignedTemp; 1264218893Sdim } 1265193326Sed } else { 1266193326Sed // Load scalar value from indirect argument. 1267218893Sdim CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1268218893Sdim V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty); 1269221345Sdim 1270221345Sdim if (isPromoted) 1271221345Sdim V = emitArgumentDemotion(*this, Arg, V); 1272193326Sed } 1273221345Sdim EmitParmDecl(*Arg, V, ArgNo); 1274193326Sed break; 1275193326Sed } 1276193631Sed 1277193631Sed case ABIArgInfo::Extend: 1278193326Sed case ABIArgInfo::Direct: { 1279234353Sdim 1280212904Sdim // If we have the trivial case, handle it with no muss and fuss. 1281212904Sdim if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1282212904Sdim ArgI.getCoerceToType() == ConvertType(Ty) && 1283212904Sdim ArgI.getDirectOffset() == 0) { 1284212904Sdim assert(AI != Fn->arg_end() && "Argument mismatch!"); 1285212904Sdim llvm::Value *V = AI; 1286218893Sdim 1287206084Srdivacky if (Arg->getType().isRestrictQualified()) 1288249423Sdim AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1289249423Sdim AI->getArgNo() + 1, 1290249423Sdim llvm::Attribute::NoAlias)); 1291206084Srdivacky 1292226633Sdim // Ensure the argument is the correct type. 1293226633Sdim if (V->getType() != ArgI.getCoerceToType()) 1294226633Sdim V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1295226633Sdim 1296221345Sdim if (isPromoted) 1297221345Sdim V = emitArgumentDemotion(*this, Arg, V); 1298249423Sdim 1299249423Sdim // Because of merging of function types from multiple decls it is 1300249423Sdim // possible for the type of an argument to not match the corresponding 1301249423Sdim // type in the function type. Since we are codegening the callee 1302249423Sdim // in here, add a cast to the argument type. 1303249423Sdim llvm::Type *LTy = ConvertType(Arg->getType()); 1304249423Sdim if (V->getType() != LTy) 1305249423Sdim V = Builder.CreateBitCast(V, LTy); 1306249423Sdim 1307221345Sdim EmitParmDecl(*Arg, V, ArgNo); 1308212904Sdim break; 1309193326Sed } 1310198092Srdivacky 1311234353Sdim llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1312218893Sdim 1313212904Sdim // The alignment we need to use is the max of the requested alignment for 1314212904Sdim // the argument plus the alignment required by our access code below. 1315218893Sdim unsigned AlignmentToUse = 1316243830Sdim CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1317212904Sdim AlignmentToUse = std::max(AlignmentToUse, 1318212904Sdim (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1319218893Sdim 1320212904Sdim Alloca->setAlignment(AlignmentToUse); 1321210299Sed llvm::Value *V = Alloca; 1322212904Sdim llvm::Value *Ptr = V; // Pointer to store into. 1323218893Sdim 1324212904Sdim // If the value is offset in memory, apply the offset now. 1325212904Sdim if (unsigned Offs = ArgI.getDirectOffset()) { 1326212904Sdim Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1327212904Sdim Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1328218893Sdim Ptr = Builder.CreateBitCast(Ptr, 1329212904Sdim llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1330212904Sdim } 1331218893Sdim 1332210299Sed // If the coerce-to type is a first class aggregate, we flatten it and 1333210299Sed // pass the elements. Either way is semantically identical, but fast-isel 1334210299Sed // and the optimizer generally likes scalar values better than FCAs. 1335234353Sdim llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1336234353Sdim if (STy && STy->getNumElements() > 1) { 1337243830Sdim uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1338234353Sdim llvm::Type *DstTy = 1339234353Sdim cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1340243830Sdim uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1341218893Sdim 1342234353Sdim if (SrcSize <= DstSize) { 1343234353Sdim Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1344234353Sdim 1345234353Sdim for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1346234353Sdim assert(AI != Fn->arg_end() && "Argument mismatch!"); 1347234353Sdim AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1348234353Sdim llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1349234353Sdim Builder.CreateStore(AI++, EltPtr); 1350234353Sdim } 1351234353Sdim } else { 1352234353Sdim llvm::AllocaInst *TempAlloca = 1353234353Sdim CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1354234353Sdim TempAlloca->setAlignment(AlignmentToUse); 1355234353Sdim llvm::Value *TempV = TempAlloca; 1356234353Sdim 1357234353Sdim for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1358234353Sdim assert(AI != Fn->arg_end() && "Argument mismatch!"); 1359234353Sdim AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1360234353Sdim llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1361234353Sdim Builder.CreateStore(AI++, EltPtr); 1362234353Sdim } 1363234353Sdim 1364234353Sdim Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1365210299Sed } 1366210299Sed } else { 1367210299Sed // Simple case, just do a coerced store of the argument into the alloca. 1368210299Sed assert(AI != Fn->arg_end() && "Argument mismatch!"); 1369210299Sed AI->setName(Arg->getName() + ".coerce"); 1370212904Sdim CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1371210299Sed } 1372218893Sdim 1373218893Sdim 1374193326Sed // Match to what EmitParmDecl is expecting for this type. 1375249423Sdim if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1376212904Sdim V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty); 1377221345Sdim if (isPromoted) 1378221345Sdim V = emitArgumentDemotion(*this, Arg, V); 1379193326Sed } 1380221345Sdim EmitParmDecl(*Arg, V, ArgNo); 1381210299Sed continue; // Skip ++AI increment, already done. 1382193326Sed } 1383212904Sdim 1384212904Sdim case ABIArgInfo::Expand: { 1385212904Sdim // If this structure was expanded into multiple arguments then 1386212904Sdim // we need to create a temporary and reconstruct it from the 1387212904Sdim // arguments. 1388234353Sdim llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1389234353Sdim CharUnits Align = getContext().getDeclAlign(Arg); 1390234353Sdim Alloca->setAlignment(Align.getQuantity()); 1391234353Sdim LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1392234353Sdim llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1393234353Sdim EmitParmDecl(*Arg, Alloca, ArgNo); 1394212904Sdim 1395212904Sdim // Name the arguments used in expansion and increment AI. 1396212904Sdim unsigned Index = 0; 1397212904Sdim for (; AI != End; ++AI, ++Index) 1398226633Sdim AI->setName(Arg->getName() + "." + Twine(Index)); 1399212904Sdim continue; 1400193326Sed } 1401193326Sed 1402212904Sdim case ABIArgInfo::Ignore: 1403212904Sdim // Initialize the local variable appropriately. 1404249423Sdim if (!hasScalarEvaluationKind(Ty)) 1405221345Sdim EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo); 1406212904Sdim else 1407221345Sdim EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())), 1408221345Sdim ArgNo); 1409212904Sdim 1410212904Sdim // Skip increment, no matching LLVM parameter. 1411212904Sdim continue; 1412212904Sdim } 1413212904Sdim 1414193326Sed ++AI; 1415193326Sed } 1416193326Sed assert(AI == Fn->arg_end() && "Argument mismatch!"); 1417193326Sed} 1418193326Sed 1419234353Sdimstatic void eraseUnusedBitCasts(llvm::Instruction *insn) { 1420234353Sdim while (insn->use_empty()) { 1421234353Sdim llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1422234353Sdim if (!bitcast) return; 1423234353Sdim 1424234353Sdim // This is "safe" because we would have used a ConstantExpr otherwise. 1425234353Sdim insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1426234353Sdim bitcast->eraseFromParent(); 1427234353Sdim } 1428234353Sdim} 1429234353Sdim 1430224145Sdim/// Try to emit a fused autorelease of a return result. 1431224145Sdimstatic llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1432224145Sdim llvm::Value *result) { 1433224145Sdim // We must be immediately followed the cast. 1434224145Sdim llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1435224145Sdim if (BB->empty()) return 0; 1436224145Sdim if (&BB->back() != result) return 0; 1437224145Sdim 1438226633Sdim llvm::Type *resultType = result->getType(); 1439224145Sdim 1440224145Sdim // result is in a BasicBlock and is therefore an Instruction. 1441224145Sdim llvm::Instruction *generator = cast<llvm::Instruction>(result); 1442224145Sdim 1443226633Sdim SmallVector<llvm::Instruction*,4> insnsToKill; 1444224145Sdim 1445224145Sdim // Look for: 1446224145Sdim // %generator = bitcast %type1* %generator2 to %type2* 1447224145Sdim while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1448224145Sdim // We would have emitted this as a constant if the operand weren't 1449224145Sdim // an Instruction. 1450224145Sdim generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1451224145Sdim 1452224145Sdim // Require the generator to be immediately followed by the cast. 1453224145Sdim if (generator->getNextNode() != bitcast) 1454224145Sdim return 0; 1455224145Sdim 1456224145Sdim insnsToKill.push_back(bitcast); 1457224145Sdim } 1458224145Sdim 1459224145Sdim // Look for: 1460224145Sdim // %generator = call i8* @objc_retain(i8* %originalResult) 1461224145Sdim // or 1462224145Sdim // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1463224145Sdim llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1464224145Sdim if (!call) return 0; 1465224145Sdim 1466224145Sdim bool doRetainAutorelease; 1467224145Sdim 1468224145Sdim if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1469224145Sdim doRetainAutorelease = true; 1470224145Sdim } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1471224145Sdim .objc_retainAutoreleasedReturnValue) { 1472224145Sdim doRetainAutorelease = false; 1473224145Sdim 1474243830Sdim // If we emitted an assembly marker for this call (and the 1475243830Sdim // ARCEntrypoints field should have been set if so), go looking 1476243830Sdim // for that call. If we can't find it, we can't do this 1477243830Sdim // optimization. But it should always be the immediately previous 1478243830Sdim // instruction, unless we needed bitcasts around the call. 1479243830Sdim if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1480243830Sdim llvm::Instruction *prev = call->getPrevNode(); 1481243830Sdim assert(prev); 1482243830Sdim if (isa<llvm::BitCastInst>(prev)) { 1483243830Sdim prev = prev->getPrevNode(); 1484243830Sdim assert(prev); 1485243830Sdim } 1486243830Sdim assert(isa<llvm::CallInst>(prev)); 1487243830Sdim assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1488243830Sdim CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1489243830Sdim insnsToKill.push_back(prev); 1490243830Sdim } 1491224145Sdim } else { 1492224145Sdim return 0; 1493224145Sdim } 1494224145Sdim 1495224145Sdim result = call->getArgOperand(0); 1496224145Sdim insnsToKill.push_back(call); 1497224145Sdim 1498224145Sdim // Keep killing bitcasts, for sanity. Note that we no longer care 1499224145Sdim // about precise ordering as long as there's exactly one use. 1500224145Sdim while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1501224145Sdim if (!bitcast->hasOneUse()) break; 1502224145Sdim insnsToKill.push_back(bitcast); 1503224145Sdim result = bitcast->getOperand(0); 1504224145Sdim } 1505224145Sdim 1506224145Sdim // Delete all the unnecessary instructions, from latest to earliest. 1507226633Sdim for (SmallVectorImpl<llvm::Instruction*>::iterator 1508224145Sdim i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1509224145Sdim (*i)->eraseFromParent(); 1510224145Sdim 1511224145Sdim // Do the fused retain/autorelease if we were asked to. 1512224145Sdim if (doRetainAutorelease) 1513224145Sdim result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1514224145Sdim 1515224145Sdim // Cast back to the result type. 1516224145Sdim return CGF.Builder.CreateBitCast(result, resultType); 1517224145Sdim} 1518224145Sdim 1519234353Sdim/// If this is a +1 of the value of an immutable 'self', remove it. 1520234353Sdimstatic llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1521234353Sdim llvm::Value *result) { 1522234353Sdim // This is only applicable to a method with an immutable 'self'. 1523239462Sdim const ObjCMethodDecl *method = 1524239462Sdim dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1525234353Sdim if (!method) return 0; 1526234353Sdim const VarDecl *self = method->getSelfDecl(); 1527234353Sdim if (!self->getType().isConstQualified()) return 0; 1528234353Sdim 1529234353Sdim // Look for a retain call. 1530234353Sdim llvm::CallInst *retainCall = 1531234353Sdim dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1532234353Sdim if (!retainCall || 1533234353Sdim retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1534234353Sdim return 0; 1535234353Sdim 1536234353Sdim // Look for an ordinary load of 'self'. 1537234353Sdim llvm::Value *retainedValue = retainCall->getArgOperand(0); 1538234353Sdim llvm::LoadInst *load = 1539234353Sdim dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1540234353Sdim if (!load || load->isAtomic() || load->isVolatile() || 1541234353Sdim load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1542234353Sdim return 0; 1543234353Sdim 1544234353Sdim // Okay! Burn it all down. This relies for correctness on the 1545234353Sdim // assumption that the retain is emitted as part of the return and 1546234353Sdim // that thereafter everything is used "linearly". 1547234353Sdim llvm::Type *resultType = result->getType(); 1548234353Sdim eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1549234353Sdim assert(retainCall->use_empty()); 1550234353Sdim retainCall->eraseFromParent(); 1551234353Sdim eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1552234353Sdim 1553234353Sdim return CGF.Builder.CreateBitCast(load, resultType); 1554234353Sdim} 1555234353Sdim 1556224145Sdim/// Emit an ARC autorelease of the result of a function. 1557234353Sdim/// 1558234353Sdim/// \return the value to actually return from the function 1559224145Sdimstatic llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1560224145Sdim llvm::Value *result) { 1561234353Sdim // If we're returning 'self', kill the initial retain. This is a 1562234353Sdim // heuristic attempt to "encourage correctness" in the really unfortunate 1563234353Sdim // case where we have a return of self during a dealloc and we desperately 1564234353Sdim // need to avoid the possible autorelease. 1565234353Sdim if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1566234353Sdim return self; 1567234353Sdim 1568224145Sdim // At -O0, try to emit a fused retain/autorelease. 1569224145Sdim if (CGF.shouldUseFusedARCCalls()) 1570224145Sdim if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1571224145Sdim return fused; 1572224145Sdim 1573224145Sdim return CGF.EmitARCAutoreleaseReturnValue(result); 1574224145Sdim} 1575224145Sdim 1576234353Sdim/// Heuristically search for a dominating store to the return-value slot. 1577234353Sdimstatic llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1578234353Sdim // If there are multiple uses of the return-value slot, just check 1579234353Sdim // for something immediately preceding the IP. Sometimes this can 1580234353Sdim // happen with how we generate implicit-returns; it can also happen 1581234353Sdim // with noreturn cleanups. 1582234353Sdim if (!CGF.ReturnValue->hasOneUse()) { 1583234353Sdim llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1584234353Sdim if (IP->empty()) return 0; 1585234353Sdim llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1586234353Sdim if (!store) return 0; 1587234353Sdim if (store->getPointerOperand() != CGF.ReturnValue) return 0; 1588234353Sdim assert(!store->isAtomic() && !store->isVolatile()); // see below 1589234353Sdim return store; 1590234353Sdim } 1591234353Sdim 1592234353Sdim llvm::StoreInst *store = 1593234353Sdim dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); 1594234353Sdim if (!store) return 0; 1595234353Sdim 1596234353Sdim // These aren't actually possible for non-coerced returns, and we 1597234353Sdim // only care about non-coerced returns on this code path. 1598234353Sdim assert(!store->isAtomic() && !store->isVolatile()); 1599234353Sdim 1600234353Sdim // Now do a first-and-dirty dominance check: just walk up the 1601234353Sdim // single-predecessors chain from the current insertion point. 1602234353Sdim llvm::BasicBlock *StoreBB = store->getParent(); 1603234353Sdim llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1604234353Sdim while (IP != StoreBB) { 1605234353Sdim if (!(IP = IP->getSinglePredecessor())) 1606234353Sdim return 0; 1607234353Sdim } 1608234353Sdim 1609234353Sdim // Okay, the store's basic block dominates the insertion point; we 1610234353Sdim // can do our thing. 1611234353Sdim return store; 1612234353Sdim} 1613234353Sdim 1614249423Sdim/// Check whether 'this' argument of a callsite matches 'this' of the caller. 1615249423Sdimstatic bool checkThisPointer(llvm::Value *ThisArg, llvm::Value *This) { 1616249423Sdim if (ThisArg == This) 1617249423Sdim return true; 1618249423Sdim // Check whether ThisArg is a bitcast of This. 1619249423Sdim llvm::BitCastInst *Bitcast; 1620249423Sdim if ((Bitcast = dyn_cast<llvm::BitCastInst>(ThisArg)) && 1621249423Sdim Bitcast->getOperand(0) == This) 1622249423Sdim return true; 1623249423Sdim return false; 1624249423Sdim} 1625249423Sdim 1626251662Sdimvoid CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1627251662Sdim bool EmitRetDbgLoc) { 1628210299Sed // Functions with no result always return void. 1629210299Sed if (ReturnValue == 0) { 1630210299Sed Builder.CreateRetVoid(); 1631210299Sed return; 1632210299Sed } 1633210299Sed 1634212904Sdim llvm::DebugLoc RetDbgLoc; 1635193326Sed llvm::Value *RV = 0; 1636210299Sed QualType RetTy = FI.getReturnType(); 1637210299Sed const ABIArgInfo &RetAI = FI.getReturnInfo(); 1638193326Sed 1639210299Sed switch (RetAI.getKind()) { 1640212904Sdim case ABIArgInfo::Indirect: { 1641249423Sdim switch (getEvaluationKind(RetTy)) { 1642249423Sdim case TEK_Complex: { 1643249423Sdim ComplexPairTy RT = 1644249423Sdim EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy)); 1645249423Sdim EmitStoreOfComplex(RT, 1646249423Sdim MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1647249423Sdim /*isInit*/ true); 1648249423Sdim break; 1649249423Sdim } 1650249423Sdim case TEK_Aggregate: 1651210299Sed // Do nothing; aggregrates get evaluated directly into the destination. 1652249423Sdim break; 1653249423Sdim case TEK_Scalar: 1654249423Sdim EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 1655249423Sdim MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy), 1656249423Sdim /*isInit*/ true); 1657249423Sdim break; 1658210299Sed } 1659210299Sed break; 1660212904Sdim } 1661193631Sed 1662210299Sed case ABIArgInfo::Extend: 1663212904Sdim case ABIArgInfo::Direct: 1664212904Sdim if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1665212904Sdim RetAI.getDirectOffset() == 0) { 1666212904Sdim // The internal return value temp always will have pointer-to-return-type 1667212904Sdim // type, just do a load. 1668218893Sdim 1669234353Sdim // If there is a dominating store to ReturnValue, we can elide 1670234353Sdim // the load, zap the store, and usually zap the alloca. 1671234353Sdim if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1672251662Sdim // Reuse the debug location from the store unless we're told not to. 1673251662Sdim if (EmitRetDbgLoc) 1674251662Sdim RetDbgLoc = SI->getDebugLoc(); 1675212904Sdim // Get the stored value and nuke the now-dead store. 1676212904Sdim RV = SI->getValueOperand(); 1677212904Sdim SI->eraseFromParent(); 1678218893Sdim 1679212904Sdim // If that was the only use of the return value, nuke it as well now. 1680212904Sdim if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1681212904Sdim cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1682212904Sdim ReturnValue = 0; 1683212904Sdim } 1684234353Sdim 1685234353Sdim // Otherwise, we have to do a simple load. 1686234353Sdim } else { 1687234353Sdim RV = Builder.CreateLoad(ReturnValue); 1688212904Sdim } 1689210299Sed } else { 1690212904Sdim llvm::Value *V = ReturnValue; 1691212904Sdim // If the value is offset in memory, apply the offset now. 1692212904Sdim if (unsigned Offs = RetAI.getDirectOffset()) { 1693212904Sdim V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1694212904Sdim V = Builder.CreateConstGEP1_32(V, Offs); 1695218893Sdim V = Builder.CreateBitCast(V, 1696212904Sdim llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1697212904Sdim } 1698218893Sdim 1699212904Sdim RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1700210299Sed } 1701224145Sdim 1702224145Sdim // In ARC, end functions that return a retainable type with a call 1703224145Sdim // to objc_autoreleaseReturnValue. 1704224145Sdim if (AutoreleaseResult) { 1705234353Sdim assert(getLangOpts().ObjCAutoRefCount && 1706224145Sdim !FI.isReturnsRetained() && 1707224145Sdim RetTy->isObjCRetainableType()); 1708224145Sdim RV = emitAutoreleaseOfResult(*this, RV); 1709224145Sdim } 1710224145Sdim 1711210299Sed break; 1712212904Sdim 1713210299Sed case ABIArgInfo::Ignore: 1714210299Sed break; 1715193326Sed 1716210299Sed case ABIArgInfo::Expand: 1717226633Sdim llvm_unreachable("Invalid ABI kind for return argument"); 1718193326Sed } 1719198092Srdivacky 1720249423Sdim // If this function returns 'this', the last instruction is a CallInst 1721249423Sdim // that returns 'this', and 'this' argument of the CallInst points to 1722249423Sdim // the same object as CXXThisValue, use the return value from the CallInst. 1723249423Sdim // We will not need to keep 'this' alive through the callsite. It also enables 1724249423Sdim // optimizations in the backend, such as tail call optimization. 1725249423Sdim if (CalleeWithThisReturn && CGM.getCXXABI().HasThisReturn(CurGD)) { 1726249423Sdim llvm::BasicBlock *IP = Builder.GetInsertBlock(); 1727249423Sdim llvm::CallInst *Callsite; 1728249423Sdim if (!IP->empty() && (Callsite = dyn_cast<llvm::CallInst>(&IP->back())) && 1729249423Sdim Callsite->getCalledFunction() == CalleeWithThisReturn && 1730249423Sdim checkThisPointer(Callsite->getOperand(0), CXXThisValue)) 1731249423Sdim RV = Builder.CreateBitCast(Callsite, RetAI.getCoerceToType()); 1732249423Sdim } 1733210299Sed llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); 1734212904Sdim if (!RetDbgLoc.isUnknown()) 1735212904Sdim Ret->setDebugLoc(RetDbgLoc); 1736193326Sed} 1737193326Sed 1738221345Sdimvoid CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 1739221345Sdim const VarDecl *param) { 1740208600Srdivacky // StartFunction converted the ABI-lowered parameter(s) into a 1741208600Srdivacky // local alloca. We need to turn that into an r-value suitable 1742208600Srdivacky // for EmitCall. 1743221345Sdim llvm::Value *local = GetAddrOfLocalVar(param); 1744208600Srdivacky 1745221345Sdim QualType type = param->getType(); 1746218893Sdim 1747208600Srdivacky // For the most part, we just need to load the alloca, except: 1748208600Srdivacky // 1) aggregate r-values are actually pointers to temporaries, and 1749249423Sdim // 2) references to non-scalars are pointers directly to the aggregate. 1750249423Sdim // I don't know why references to scalars are different here. 1751221345Sdim if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 1752249423Sdim if (!hasScalarEvaluationKind(ref->getPointeeType())) 1753221345Sdim return args.add(RValue::getAggregate(local), type); 1754208600Srdivacky 1755208600Srdivacky // Locals which are references to scalars are represented 1756208600Srdivacky // with allocas holding the pointer. 1757221345Sdim return args.add(RValue::get(Builder.CreateLoad(local)), type); 1758208600Srdivacky } 1759208600Srdivacky 1760249423Sdim args.add(convertTempToRValue(local, type), type); 1761208600Srdivacky} 1762208600Srdivacky 1763224145Sdimstatic bool isProvablyNull(llvm::Value *addr) { 1764224145Sdim return isa<llvm::ConstantPointerNull>(addr); 1765224145Sdim} 1766224145Sdim 1767224145Sdimstatic bool isProvablyNonNull(llvm::Value *addr) { 1768224145Sdim return isa<llvm::AllocaInst>(addr); 1769224145Sdim} 1770224145Sdim 1771224145Sdim/// Emit the actual writing-back of a writeback. 1772224145Sdimstatic void emitWriteback(CodeGenFunction &CGF, 1773224145Sdim const CallArgList::Writeback &writeback) { 1774249423Sdim const LValue &srcLV = writeback.Source; 1775249423Sdim llvm::Value *srcAddr = srcLV.getAddress(); 1776224145Sdim assert(!isProvablyNull(srcAddr) && 1777224145Sdim "shouldn't have writeback for provably null argument"); 1778224145Sdim 1779224145Sdim llvm::BasicBlock *contBB = 0; 1780224145Sdim 1781224145Sdim // If the argument wasn't provably non-null, we need to null check 1782224145Sdim // before doing the store. 1783224145Sdim bool provablyNonNull = isProvablyNonNull(srcAddr); 1784224145Sdim if (!provablyNonNull) { 1785224145Sdim llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 1786224145Sdim contBB = CGF.createBasicBlock("icr.done"); 1787224145Sdim 1788224145Sdim llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1789224145Sdim CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 1790224145Sdim CGF.EmitBlock(writebackBB); 1791224145Sdim } 1792224145Sdim 1793224145Sdim // Load the value to writeback. 1794224145Sdim llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 1795224145Sdim 1796224145Sdim // Cast it back, in case we're writing an id to a Foo* or something. 1797224145Sdim value = CGF.Builder.CreateBitCast(value, 1798224145Sdim cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 1799224145Sdim "icr.writeback-cast"); 1800224145Sdim 1801224145Sdim // Perform the writeback. 1802224145Sdim 1803249423Sdim // If we have a "to use" value, it's something we need to emit a use 1804249423Sdim // of. This has to be carefully threaded in: if it's done after the 1805249423Sdim // release it's potentially undefined behavior (and the optimizer 1806249423Sdim // will ignore it), and if it happens before the retain then the 1807249423Sdim // optimizer could move the release there. 1808249423Sdim if (writeback.ToUse) { 1809249423Sdim assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 1810249423Sdim 1811249423Sdim // Retain the new value. No need to block-copy here: the block's 1812249423Sdim // being passed up the stack. 1813249423Sdim value = CGF.EmitARCRetainNonBlock(value); 1814249423Sdim 1815249423Sdim // Emit the intrinsic use here. 1816249423Sdim CGF.EmitARCIntrinsicUse(writeback.ToUse); 1817249423Sdim 1818249423Sdim // Load the old value (primitively). 1819249423Sdim llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV); 1820249423Sdim 1821249423Sdim // Put the new value in place (primitively). 1822249423Sdim CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 1823249423Sdim 1824249423Sdim // Release the old value. 1825249423Sdim CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 1826249423Sdim 1827249423Sdim // Otherwise, we can just do a normal lvalue store. 1828249423Sdim } else { 1829249423Sdim CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 1830249423Sdim } 1831249423Sdim 1832224145Sdim // Jump to the continuation block. 1833224145Sdim if (!provablyNonNull) 1834224145Sdim CGF.EmitBlock(contBB); 1835224145Sdim} 1836224145Sdim 1837224145Sdimstatic void emitWritebacks(CodeGenFunction &CGF, 1838224145Sdim const CallArgList &args) { 1839224145Sdim for (CallArgList::writeback_iterator 1840224145Sdim i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i) 1841224145Sdim emitWriteback(CGF, *i); 1842224145Sdim} 1843224145Sdim 1844249423Sdimstatic const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 1845249423Sdim if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 1846249423Sdim if (uop->getOpcode() == UO_AddrOf) 1847249423Sdim return uop->getSubExpr(); 1848249423Sdim return 0; 1849249423Sdim} 1850249423Sdim 1851224145Sdim/// Emit an argument that's being passed call-by-writeback. That is, 1852224145Sdim/// we are passing the address of 1853224145Sdimstatic void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 1854224145Sdim const ObjCIndirectCopyRestoreExpr *CRE) { 1855249423Sdim LValue srcLV; 1856224145Sdim 1857249423Sdim // Make an optimistic effort to emit the address as an l-value. 1858249423Sdim // This can fail if the the argument expression is more complicated. 1859249423Sdim if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 1860249423Sdim srcLV = CGF.EmitLValue(lvExpr); 1861249423Sdim 1862249423Sdim // Otherwise, just emit it as a scalar. 1863249423Sdim } else { 1864249423Sdim llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 1865249423Sdim 1866249423Sdim QualType srcAddrType = 1867249423Sdim CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 1868249423Sdim srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 1869249423Sdim } 1870249423Sdim llvm::Value *srcAddr = srcLV.getAddress(); 1871249423Sdim 1872224145Sdim // The dest and src types don't necessarily match in LLVM terms 1873224145Sdim // because of the crazy ObjC compatibility rules. 1874224145Sdim 1875226633Sdim llvm::PointerType *destType = 1876224145Sdim cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 1877224145Sdim 1878224145Sdim // If the address is a constant null, just pass the appropriate null. 1879224145Sdim if (isProvablyNull(srcAddr)) { 1880224145Sdim args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 1881224145Sdim CRE->getType()); 1882224145Sdim return; 1883224145Sdim } 1884224145Sdim 1885224145Sdim // Create the temporary. 1886224145Sdim llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 1887224145Sdim "icr.temp"); 1888249423Sdim // Loading an l-value can introduce a cleanup if the l-value is __weak, 1889249423Sdim // and that cleanup will be conditional if we can't prove that the l-value 1890249423Sdim // isn't null, so we need to register a dominating point so that the cleanups 1891249423Sdim // system will make valid IR. 1892249423Sdim CodeGenFunction::ConditionalEvaluation condEval(CGF); 1893249423Sdim 1894224145Sdim // Zero-initialize it if we're not doing a copy-initialization. 1895224145Sdim bool shouldCopy = CRE->shouldCopy(); 1896224145Sdim if (!shouldCopy) { 1897224145Sdim llvm::Value *null = 1898224145Sdim llvm::ConstantPointerNull::get( 1899224145Sdim cast<llvm::PointerType>(destType->getElementType())); 1900224145Sdim CGF.Builder.CreateStore(null, temp); 1901224145Sdim } 1902249423Sdim 1903224145Sdim llvm::BasicBlock *contBB = 0; 1904249423Sdim llvm::BasicBlock *originBB = 0; 1905224145Sdim 1906224145Sdim // If the address is *not* known to be non-null, we need to switch. 1907224145Sdim llvm::Value *finalArgument; 1908224145Sdim 1909224145Sdim bool provablyNonNull = isProvablyNonNull(srcAddr); 1910224145Sdim if (provablyNonNull) { 1911224145Sdim finalArgument = temp; 1912224145Sdim } else { 1913224145Sdim llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 1914224145Sdim 1915224145Sdim finalArgument = CGF.Builder.CreateSelect(isNull, 1916224145Sdim llvm::ConstantPointerNull::get(destType), 1917224145Sdim temp, "icr.argument"); 1918224145Sdim 1919224145Sdim // If we need to copy, then the load has to be conditional, which 1920224145Sdim // means we need control flow. 1921224145Sdim if (shouldCopy) { 1922249423Sdim originBB = CGF.Builder.GetInsertBlock(); 1923224145Sdim contBB = CGF.createBasicBlock("icr.cont"); 1924224145Sdim llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 1925224145Sdim CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 1926224145Sdim CGF.EmitBlock(copyBB); 1927249423Sdim condEval.begin(CGF); 1928224145Sdim } 1929224145Sdim } 1930224145Sdim 1931249423Sdim llvm::Value *valueToUse = 0; 1932249423Sdim 1933224145Sdim // Perform a copy if necessary. 1934224145Sdim if (shouldCopy) { 1935224145Sdim RValue srcRV = CGF.EmitLoadOfLValue(srcLV); 1936224145Sdim assert(srcRV.isScalar()); 1937224145Sdim 1938224145Sdim llvm::Value *src = srcRV.getScalarVal(); 1939224145Sdim src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 1940224145Sdim "icr.cast"); 1941224145Sdim 1942224145Sdim // Use an ordinary store, not a store-to-lvalue. 1943224145Sdim CGF.Builder.CreateStore(src, temp); 1944249423Sdim 1945249423Sdim // If optimization is enabled, and the value was held in a 1946249423Sdim // __strong variable, we need to tell the optimizer that this 1947249423Sdim // value has to stay alive until we're doing the store back. 1948249423Sdim // This is because the temporary is effectively unretained, 1949249423Sdim // and so otherwise we can violate the high-level semantics. 1950249423Sdim if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 1951249423Sdim srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 1952249423Sdim valueToUse = src; 1953249423Sdim } 1954224145Sdim } 1955249423Sdim 1956224145Sdim // Finish the control flow if we needed it. 1957249423Sdim if (shouldCopy && !provablyNonNull) { 1958249423Sdim llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 1959224145Sdim CGF.EmitBlock(contBB); 1960224145Sdim 1961249423Sdim // Make a phi for the value to intrinsically use. 1962249423Sdim if (valueToUse) { 1963249423Sdim llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 1964249423Sdim "icr.to-use"); 1965249423Sdim phiToUse->addIncoming(valueToUse, copyBB); 1966249423Sdim phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 1967249423Sdim originBB); 1968249423Sdim valueToUse = phiToUse; 1969249423Sdim } 1970249423Sdim 1971249423Sdim condEval.end(CGF); 1972249423Sdim } 1973249423Sdim 1974249423Sdim args.addWriteback(srcLV, temp, valueToUse); 1975224145Sdim args.add(RValue::get(finalArgument), CRE->getType()); 1976224145Sdim} 1977224145Sdim 1978221345Sdimvoid CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 1979221345Sdim QualType type) { 1980224145Sdim if (const ObjCIndirectCopyRestoreExpr *CRE 1981224145Sdim = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 1982243830Sdim assert(getLangOpts().ObjCAutoRefCount); 1983224145Sdim assert(getContext().hasSameType(E->getType(), type)); 1984224145Sdim return emitWritebackArg(*this, args, CRE); 1985224145Sdim } 1986224145Sdim 1987226633Sdim assert(type->isReferenceType() == E->isGLValue() && 1988226633Sdim "reference binding to unmaterialized r-value!"); 1989226633Sdim 1990226633Sdim if (E->isGLValue()) { 1991226633Sdim assert(E->getObjectKind() == OK_Ordinary); 1992221345Sdim return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0), 1993221345Sdim type); 1994226633Sdim } 1995198092Srdivacky 1996249423Sdim if (hasAggregateEvaluationKind(type) && 1997224145Sdim isa<ImplicitCastExpr>(E) && 1998223017Sdim cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 1999223017Sdim LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2000223017Sdim assert(L.isSimple()); 2001234353Sdim args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2002223017Sdim return; 2003223017Sdim } 2004223017Sdim 2005221345Sdim args.add(EmitAnyExprToTemp(E), type); 2006193326Sed} 2007193326Sed 2008234353Sdim// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2009234353Sdim// optimizer it can aggressively ignore unwind edges. 2010234353Sdimvoid 2011234353SdimCodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2012234353Sdim if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2013234353Sdim !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2014234353Sdim Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2015234353Sdim CGM.getNoObjCARCExceptionsMetadata()); 2016234353Sdim} 2017234353Sdim 2018249423Sdim/// Emits a call to the given no-arguments nounwind runtime function. 2019249423Sdimllvm::CallInst * 2020249423SdimCodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2021249423Sdim const llvm::Twine &name) { 2022249423Sdim return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2023249423Sdim} 2024249423Sdim 2025249423Sdim/// Emits a call to the given nounwind runtime function. 2026249423Sdimllvm::CallInst * 2027249423SdimCodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2028249423Sdim ArrayRef<llvm::Value*> args, 2029249423Sdim const llvm::Twine &name) { 2030249423Sdim llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2031249423Sdim call->setDoesNotThrow(); 2032249423Sdim return call; 2033249423Sdim} 2034249423Sdim 2035249423Sdim/// Emits a simple call (never an invoke) to the given no-arguments 2036249423Sdim/// runtime function. 2037249423Sdimllvm::CallInst * 2038249423SdimCodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2039249423Sdim const llvm::Twine &name) { 2040249423Sdim return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2041249423Sdim} 2042249423Sdim 2043249423Sdim/// Emits a simple call (never an invoke) to the given runtime 2044249423Sdim/// function. 2045249423Sdimllvm::CallInst * 2046249423SdimCodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2047249423Sdim ArrayRef<llvm::Value*> args, 2048249423Sdim const llvm::Twine &name) { 2049249423Sdim llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2050249423Sdim call->setCallingConv(getRuntimeCC()); 2051249423Sdim return call; 2052249423Sdim} 2053249423Sdim 2054249423Sdim/// Emits a call or invoke to the given noreturn runtime function. 2055249423Sdimvoid CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2056249423Sdim ArrayRef<llvm::Value*> args) { 2057249423Sdim if (getInvokeDest()) { 2058249423Sdim llvm::InvokeInst *invoke = 2059249423Sdim Builder.CreateInvoke(callee, 2060249423Sdim getUnreachableBlock(), 2061249423Sdim getInvokeDest(), 2062249423Sdim args); 2063249423Sdim invoke->setDoesNotReturn(); 2064249423Sdim invoke->setCallingConv(getRuntimeCC()); 2065249423Sdim } else { 2066249423Sdim llvm::CallInst *call = Builder.CreateCall(callee, args); 2067249423Sdim call->setDoesNotReturn(); 2068249423Sdim call->setCallingConv(getRuntimeCC()); 2069249423Sdim Builder.CreateUnreachable(); 2070249423Sdim } 2071249423Sdim} 2072249423Sdim 2073249423Sdim/// Emits a call or invoke instruction to the given nullary runtime 2074249423Sdim/// function. 2075249423Sdimllvm::CallSite 2076249423SdimCodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2077249423Sdim const Twine &name) { 2078249423Sdim return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name); 2079249423Sdim} 2080249423Sdim 2081249423Sdim/// Emits a call or invoke instruction to the given runtime function. 2082249423Sdimllvm::CallSite 2083249423SdimCodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2084249423Sdim ArrayRef<llvm::Value*> args, 2085249423Sdim const Twine &name) { 2086249423Sdim llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2087249423Sdim callSite.setCallingConv(getRuntimeCC()); 2088249423Sdim return callSite; 2089249423Sdim} 2090249423Sdim 2091249423Sdimllvm::CallSite 2092249423SdimCodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2093249423Sdim const Twine &Name) { 2094249423Sdim return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 2095249423Sdim} 2096249423Sdim 2097210299Sed/// Emits a call or invoke instruction to the given function, depending 2098210299Sed/// on the current state of the EH stack. 2099210299Sedllvm::CallSite 2100210299SedCodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2101226633Sdim ArrayRef<llvm::Value *> Args, 2102226633Sdim const Twine &Name) { 2103210299Sed llvm::BasicBlock *InvokeDest = getInvokeDest(); 2104234353Sdim 2105234353Sdim llvm::Instruction *Inst; 2106210299Sed if (!InvokeDest) 2107234353Sdim Inst = Builder.CreateCall(Callee, Args, Name); 2108234353Sdim else { 2109234353Sdim llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2110234353Sdim Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2111234353Sdim EmitBlock(ContBB); 2112234353Sdim } 2113210299Sed 2114234353Sdim // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2115234353Sdim // optimizer it can aggressively ignore unwind edges. 2116234353Sdim if (CGM.getLangOpts().ObjCAutoRefCount) 2117234353Sdim AddObjCARCExceptionMetadata(Inst); 2118234353Sdim 2119234353Sdim return Inst; 2120210299Sed} 2121210299Sed 2122224145Sdimstatic void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 2123224145Sdim llvm::FunctionType *FTy) { 2124224145Sdim if (ArgNo < FTy->getNumParams()) 2125224145Sdim assert(Elt->getType() == FTy->getParamType(ArgNo)); 2126224145Sdim else 2127224145Sdim assert(FTy->isVarArg()); 2128224145Sdim ++ArgNo; 2129224145Sdim} 2130224145Sdim 2131224145Sdimvoid CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 2132226633Sdim SmallVector<llvm::Value*,16> &Args, 2133224145Sdim llvm::FunctionType *IRFuncTy) { 2134226633Sdim if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2135226633Sdim unsigned NumElts = AT->getSize().getZExtValue(); 2136226633Sdim QualType EltTy = AT->getElementType(); 2137226633Sdim llvm::Value *Addr = RV.getAggregateAddr(); 2138226633Sdim for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 2139226633Sdim llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 2140249423Sdim RValue EltRV = convertTempToRValue(EltAddr, EltTy); 2141226633Sdim ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 2142226633Sdim } 2143234982Sdim } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2144226633Sdim RecordDecl *RD = RT->getDecl(); 2145226633Sdim assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 2146234982Sdim LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 2147234982Sdim 2148234982Sdim if (RD->isUnion()) { 2149234982Sdim const FieldDecl *LargestFD = 0; 2150234982Sdim CharUnits UnionSize = CharUnits::Zero(); 2151234982Sdim 2152234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2153234982Sdim i != e; ++i) { 2154234982Sdim const FieldDecl *FD = *i; 2155234982Sdim assert(!FD->isBitField() && 2156234982Sdim "Cannot expand structure with bit-field members."); 2157234982Sdim CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 2158234982Sdim if (UnionSize < FieldSize) { 2159234982Sdim UnionSize = FieldSize; 2160234982Sdim LargestFD = FD; 2161234982Sdim } 2162234982Sdim } 2163234982Sdim if (LargestFD) { 2164234982Sdim RValue FldRV = EmitRValueForField(LV, LargestFD); 2165234982Sdim ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 2166234982Sdim } 2167234982Sdim } else { 2168234982Sdim for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2169234982Sdim i != e; ++i) { 2170234982Sdim FieldDecl *FD = *i; 2171234982Sdim 2172234982Sdim RValue FldRV = EmitRValueForField(LV, FD); 2173234982Sdim ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 2174234982Sdim } 2175224145Sdim } 2176234353Sdim } else if (Ty->isAnyComplexType()) { 2177226633Sdim ComplexPairTy CV = RV.getComplexVal(); 2178226633Sdim Args.push_back(CV.first); 2179226633Sdim Args.push_back(CV.second); 2180226633Sdim } else { 2181224145Sdim assert(RV.isScalar() && 2182224145Sdim "Unexpected non-scalar rvalue during struct expansion."); 2183224145Sdim 2184224145Sdim // Insert a bitcast as needed. 2185224145Sdim llvm::Value *V = RV.getScalarVal(); 2186224145Sdim if (Args.size() < IRFuncTy->getNumParams() && 2187224145Sdim V->getType() != IRFuncTy->getParamType(Args.size())) 2188224145Sdim V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 2189224145Sdim 2190224145Sdim Args.push_back(V); 2191224145Sdim } 2192224145Sdim} 2193224145Sdim 2194224145Sdim 2195193326SedRValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2196198092Srdivacky llvm::Value *Callee, 2197201361Srdivacky ReturnValueSlot ReturnValue, 2198193326Sed const CallArgList &CallArgs, 2199207619Srdivacky const Decl *TargetDecl, 2200207619Srdivacky llvm::Instruction **callOrInvoke) { 2201193326Sed // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2202226633Sdim SmallVector<llvm::Value*, 16> Args; 2203193326Sed 2204193326Sed // Handle struct-return functions by passing a pointer to the 2205193326Sed // location that we would like to return into. 2206193326Sed QualType RetTy = CallInfo.getReturnType(); 2207193326Sed const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2208198092Srdivacky 2209224145Sdim // IRArgNo - Keep track of the argument number in the callee we're looking at. 2210224145Sdim unsigned IRArgNo = 0; 2211224145Sdim llvm::FunctionType *IRFuncTy = 2212224145Sdim cast<llvm::FunctionType>( 2213224145Sdim cast<llvm::PointerType>(Callee->getType())->getElementType()); 2214198092Srdivacky 2215194179Sed // If the call returns a temporary with struct return, create a temporary 2216201361Srdivacky // alloca to hold the result, unless one is given to us. 2217210299Sed if (CGM.ReturnTypeUsesSRet(CallInfo)) { 2218201361Srdivacky llvm::Value *Value = ReturnValue.getValue(); 2219201361Srdivacky if (!Value) 2220203955Srdivacky Value = CreateMemTemp(RetTy); 2221201361Srdivacky Args.push_back(Value); 2222224145Sdim checkArgMatches(Value, IRArgNo, IRFuncTy); 2223201361Srdivacky } 2224198092Srdivacky 2225193326Sed assert(CallInfo.arg_size() == CallArgs.size() && 2226193326Sed "Mismatch between function signature & arguments."); 2227193326Sed CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2228198092Srdivacky for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2229193326Sed I != E; ++I, ++info_it) { 2230193326Sed const ABIArgInfo &ArgInfo = info_it->info; 2231221345Sdim RValue RV = I->RV; 2232193326Sed 2233249423Sdim CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 2234243830Sdim 2235243830Sdim // Insert a padding argument to ensure proper alignment. 2236243830Sdim if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2237243830Sdim Args.push_back(llvm::UndefValue::get(PaddingType)); 2238243830Sdim ++IRArgNo; 2239243830Sdim } 2240243830Sdim 2241193326Sed switch (ArgInfo.getKind()) { 2242212904Sdim case ABIArgInfo::Indirect: { 2243193326Sed if (RV.isScalar() || RV.isComplex()) { 2244193326Sed // Make a temporary alloca to pass the argument. 2245224145Sdim llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2246224145Sdim if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2247224145Sdim AI->setAlignment(ArgInfo.getIndirectAlign()); 2248224145Sdim Args.push_back(AI); 2249249423Sdim 2250249423Sdim LValue argLV = 2251249423Sdim MakeAddrLValue(Args.back(), I->Ty, TypeAlign); 2252224145Sdim 2253193326Sed if (RV.isScalar()) 2254249423Sdim EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true); 2255193326Sed else 2256249423Sdim EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true); 2257224145Sdim 2258224145Sdim // Validate argument match. 2259224145Sdim checkArgMatches(AI, IRArgNo, IRFuncTy); 2260193326Sed } else { 2261224145Sdim // We want to avoid creating an unnecessary temporary+copy here; 2262249423Sdim // however, we need one in three cases: 2263224145Sdim // 1. If the argument is not byval, and we are required to copy the 2264224145Sdim // source. (This case doesn't occur on any common architecture.) 2265224145Sdim // 2. If the argument is byval, RV is not sufficiently aligned, and 2266224145Sdim // we cannot force it to be sufficiently aligned. 2267249423Sdim // 3. If the argument is byval, but RV is located in an address space 2268249423Sdim // different than that of the argument (0). 2269224145Sdim llvm::Value *Addr = RV.getAggregateAddr(); 2270224145Sdim unsigned Align = ArgInfo.getIndirectAlign(); 2271243830Sdim const llvm::DataLayout *TD = &CGM.getDataLayout(); 2272249423Sdim const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 2273249423Sdim const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ? 2274249423Sdim IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0); 2275224145Sdim if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2276249423Sdim (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 2277249423Sdim llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 2278249423Sdim (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 2279224145Sdim // Create an aligned temporary, and copy to it. 2280224145Sdim llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2281224145Sdim if (Align > AI->getAlignment()) 2282224145Sdim AI->setAlignment(Align); 2283224145Sdim Args.push_back(AI); 2284224145Sdim EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2285224145Sdim 2286224145Sdim // Validate argument match. 2287224145Sdim checkArgMatches(AI, IRArgNo, IRFuncTy); 2288224145Sdim } else { 2289224145Sdim // Skip the extra memcpy call. 2290224145Sdim Args.push_back(Addr); 2291224145Sdim 2292224145Sdim // Validate argument match. 2293224145Sdim checkArgMatches(Addr, IRArgNo, IRFuncTy); 2294224145Sdim } 2295193326Sed } 2296193326Sed break; 2297212904Sdim } 2298193326Sed 2299212904Sdim case ABIArgInfo::Ignore: 2300212904Sdim break; 2301218893Sdim 2302193631Sed case ABIArgInfo::Extend: 2303212904Sdim case ABIArgInfo::Direct: { 2304212904Sdim if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2305212904Sdim ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2306212904Sdim ArgInfo.getDirectOffset() == 0) { 2307224145Sdim llvm::Value *V; 2308212904Sdim if (RV.isScalar()) 2309224145Sdim V = RV.getScalarVal(); 2310212904Sdim else 2311224145Sdim V = Builder.CreateLoad(RV.getAggregateAddr()); 2312224145Sdim 2313224145Sdim // If the argument doesn't match, perform a bitcast to coerce it. This 2314224145Sdim // can happen due to trivial type mismatches. 2315224145Sdim if (IRArgNo < IRFuncTy->getNumParams() && 2316224145Sdim V->getType() != IRFuncTy->getParamType(IRArgNo)) 2317224145Sdim V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2318224145Sdim Args.push_back(V); 2319224145Sdim 2320224145Sdim checkArgMatches(V, IRArgNo, IRFuncTy); 2321212904Sdim break; 2322193326Sed } 2323198092Srdivacky 2324193326Sed // FIXME: Avoid the conversion through memory if possible. 2325193326Sed llvm::Value *SrcPtr; 2326249423Sdim if (RV.isScalar() || RV.isComplex()) { 2327221345Sdim SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2328249423Sdim LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 2329249423Sdim if (RV.isScalar()) { 2330249423Sdim EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true); 2331249423Sdim } else { 2332249423Sdim EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true); 2333249423Sdim } 2334198092Srdivacky } else 2335193326Sed SrcPtr = RV.getAggregateAddr(); 2336218893Sdim 2337212904Sdim // If the value is offset in memory, apply the offset now. 2338212904Sdim if (unsigned Offs = ArgInfo.getDirectOffset()) { 2339212904Sdim SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2340212904Sdim SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2341218893Sdim SrcPtr = Builder.CreateBitCast(SrcPtr, 2342212904Sdim llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2343212904Sdim 2344212904Sdim } 2345218893Sdim 2346210299Sed // If the coerce-to type is a first class aggregate, we flatten it and 2347210299Sed // pass the elements. Either way is semantically identical, but fast-isel 2348210299Sed // and the optimizer generally likes scalar values better than FCAs. 2349226633Sdim if (llvm::StructType *STy = 2350210299Sed dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { 2351243830Sdim llvm::Type *SrcTy = 2352243830Sdim cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2353243830Sdim uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2354243830Sdim uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2355243830Sdim 2356243830Sdim // If the source type is smaller than the destination type of the 2357243830Sdim // coerce-to logic, copy the source value into a temp alloca the size 2358243830Sdim // of the destination type to allow loading all of it. The bits past 2359243830Sdim // the source value are left undef. 2360243830Sdim if (SrcSize < DstSize) { 2361243830Sdim llvm::AllocaInst *TempAlloca 2362243830Sdim = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2363243830Sdim Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2364243830Sdim SrcPtr = TempAlloca; 2365243830Sdim } else { 2366243830Sdim SrcPtr = Builder.CreateBitCast(SrcPtr, 2367243830Sdim llvm::PointerType::getUnqual(STy)); 2368243830Sdim } 2369243830Sdim 2370210299Sed for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2371210299Sed llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2372212904Sdim llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2373212904Sdim // We don't know what we're loading from. 2374212904Sdim LI->setAlignment(1); 2375212904Sdim Args.push_back(LI); 2376224145Sdim 2377224145Sdim // Validate argument match. 2378224145Sdim checkArgMatches(LI, IRArgNo, IRFuncTy); 2379210299Sed } 2380210299Sed } else { 2381210299Sed // In the simple case, just pass the coerced loaded value. 2382210299Sed Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2383210299Sed *this)); 2384224145Sdim 2385224145Sdim // Validate argument match. 2386224145Sdim checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2387210299Sed } 2388218893Sdim 2389193326Sed break; 2390193326Sed } 2391193326Sed 2392193326Sed case ABIArgInfo::Expand: 2393224145Sdim ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2394224145Sdim IRArgNo = Args.size(); 2395193326Sed break; 2396193326Sed } 2397193326Sed } 2398198092Srdivacky 2399194179Sed // If the callee is a bitcast of a function to a varargs pointer to function 2400194179Sed // type, check to see if we can remove the bitcast. This handles some cases 2401194179Sed // with unprototyped functions. 2402194179Sed if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2403194179Sed if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2404226633Sdim llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2405226633Sdim llvm::FunctionType *CurFT = 2406194179Sed cast<llvm::FunctionType>(CurPT->getElementType()); 2407226633Sdim llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2408198092Srdivacky 2409194179Sed if (CE->getOpcode() == llvm::Instruction::BitCast && 2410194179Sed ActualFT->getReturnType() == CurFT->getReturnType() && 2411194711Sed ActualFT->getNumParams() == CurFT->getNumParams() && 2412221345Sdim ActualFT->getNumParams() == Args.size() && 2413221345Sdim (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2414194179Sed bool ArgsMatch = true; 2415194179Sed for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2416194179Sed if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2417194179Sed ArgsMatch = false; 2418194179Sed break; 2419194179Sed } 2420198092Srdivacky 2421194179Sed // Strip the cast if we can get away with it. This is a nice cleanup, 2422194179Sed // but also allows us to inline the function at -O0 if it is marked 2423194179Sed // always_inline. 2424194179Sed if (ArgsMatch) 2425194179Sed Callee = CalleeF; 2426194179Sed } 2427194179Sed } 2428193326Sed 2429198092Srdivacky unsigned CallingConv; 2430193326Sed CodeGen::AttributeListType AttributeList; 2431249423Sdim CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 2432249423Sdim CallingConv, true); 2433249423Sdim llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 2434249423Sdim AttributeList); 2435198092Srdivacky 2436210299Sed llvm::BasicBlock *InvokeDest = 0; 2437249423Sdim if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 2438249423Sdim llvm::Attribute::NoUnwind)) 2439210299Sed InvokeDest = getInvokeDest(); 2440210299Sed 2441193326Sed llvm::CallSite CS; 2442210299Sed if (!InvokeDest) { 2443224145Sdim CS = Builder.CreateCall(Callee, Args); 2444193326Sed } else { 2445193326Sed llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2446224145Sdim CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2447193326Sed EmitBlock(Cont); 2448193326Sed } 2449210299Sed if (callOrInvoke) 2450207619Srdivacky *callOrInvoke = CS.getInstruction(); 2451193326Sed 2452193326Sed CS.setAttributes(Attrs); 2453198092Srdivacky CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 2454193326Sed 2455234353Sdim // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2456234353Sdim // optimizer it can aggressively ignore unwind edges. 2457234353Sdim if (CGM.getLangOpts().ObjCAutoRefCount) 2458234353Sdim AddObjCARCExceptionMetadata(CS.getInstruction()); 2459234353Sdim 2460193326Sed // If the call doesn't return, finish the basic block and clear the 2461193326Sed // insertion point; this allows the rest of IRgen to discard 2462193326Sed // unreachable code. 2463193326Sed if (CS.doesNotReturn()) { 2464193326Sed Builder.CreateUnreachable(); 2465193326Sed Builder.ClearInsertionPoint(); 2466198092Srdivacky 2467193326Sed // FIXME: For now, emit a dummy basic block because expr emitters in 2468193326Sed // generally are not ready to handle emitting expressions at unreachable 2469193326Sed // points. 2470193326Sed EnsureInsertPoint(); 2471198092Srdivacky 2472193326Sed // Return a reasonable RValue. 2473193326Sed return GetUndefRValue(RetTy); 2474198092Srdivacky } 2475193326Sed 2476193326Sed llvm::Instruction *CI = CS.getInstruction(); 2477198092Srdivacky if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 2478193326Sed CI->setName("call"); 2479193326Sed 2480224145Sdim // Emit any writebacks immediately. Arguably this should happen 2481224145Sdim // after any return-value munging. 2482224145Sdim if (CallArgs.hasWritebacks()) 2483224145Sdim emitWritebacks(*this, CallArgs); 2484224145Sdim 2485193326Sed switch (RetAI.getKind()) { 2486249423Sdim case ABIArgInfo::Indirect: 2487249423Sdim return convertTempToRValue(Args[0], RetTy); 2488193326Sed 2489193326Sed case ABIArgInfo::Ignore: 2490193326Sed // If we are ignoring an argument that had a result, make sure to 2491193326Sed // construct the appropriate return value for our caller. 2492193326Sed return GetUndefRValue(RetTy); 2493218893Sdim 2494212904Sdim case ABIArgInfo::Extend: 2495212904Sdim case ABIArgInfo::Direct: { 2496224145Sdim llvm::Type *RetIRTy = ConvertType(RetTy); 2497224145Sdim if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 2498249423Sdim switch (getEvaluationKind(RetTy)) { 2499249423Sdim case TEK_Complex: { 2500212904Sdim llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2501212904Sdim llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2502212904Sdim return RValue::getComplex(std::make_pair(Real, Imag)); 2503212904Sdim } 2504249423Sdim case TEK_Aggregate: { 2505212904Sdim llvm::Value *DestPtr = ReturnValue.getValue(); 2506212904Sdim bool DestIsVolatile = ReturnValue.isVolatile(); 2507193326Sed 2508212904Sdim if (!DestPtr) { 2509212904Sdim DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 2510212904Sdim DestIsVolatile = false; 2511212904Sdim } 2512223017Sdim BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 2513212904Sdim return RValue::getAggregate(DestPtr); 2514212904Sdim } 2515249423Sdim case TEK_Scalar: { 2516249423Sdim // If the argument doesn't match, perform a bitcast to coerce it. This 2517249423Sdim // can happen due to trivial type mismatches. 2518249423Sdim llvm::Value *V = CI; 2519249423Sdim if (V->getType() != RetIRTy) 2520249423Sdim V = Builder.CreateBitCast(V, RetIRTy); 2521249423Sdim return RValue::get(V); 2522249423Sdim } 2523249423Sdim } 2524249423Sdim llvm_unreachable("bad evaluation kind"); 2525212904Sdim } 2526218893Sdim 2527201361Srdivacky llvm::Value *DestPtr = ReturnValue.getValue(); 2528201361Srdivacky bool DestIsVolatile = ReturnValue.isVolatile(); 2529218893Sdim 2530201361Srdivacky if (!DestPtr) { 2531203955Srdivacky DestPtr = CreateMemTemp(RetTy, "coerce"); 2532201361Srdivacky DestIsVolatile = false; 2533201361Srdivacky } 2534218893Sdim 2535212904Sdim // If the value is offset in memory, apply the offset now. 2536212904Sdim llvm::Value *StorePtr = DestPtr; 2537212904Sdim if (unsigned Offs = RetAI.getDirectOffset()) { 2538212904Sdim StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 2539212904Sdim StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 2540218893Sdim StorePtr = Builder.CreateBitCast(StorePtr, 2541212904Sdim llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 2542212904Sdim } 2543212904Sdim CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 2544218893Sdim 2545249423Sdim return convertTempToRValue(DestPtr, RetTy); 2546193326Sed } 2547193326Sed 2548193326Sed case ABIArgInfo::Expand: 2549226633Sdim llvm_unreachable("Invalid ABI kind for return argument"); 2550193326Sed } 2551193326Sed 2552226633Sdim llvm_unreachable("Unhandled ABIArgInfo::Kind"); 2553193326Sed} 2554193326Sed 2555193326Sed/* VarArg handling */ 2556193326Sed 2557193326Sedllvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2558193326Sed return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2559193326Sed} 2560