CGCall.cpp revision 194179
150476Speter//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
215903Swosch//
315903Swosch//                     The LLVM Compiler Infrastructure
415903Swosch//
515903Swosch// This file is distributed under the University of Illinois Open Source
615903Swosch// License. See LICENSE.TXT for details.
715903Swosch//
815903Swosch//===----------------------------------------------------------------------===//
915903Swosch//
1015903Swosch// These classes wrap the information about a call or function
1115903Swosch// definition used to handle ABI compliancy.
1215903Swosch//
1315903Swosch//===----------------------------------------------------------------------===//
14139761Skrion
1534678Sbde#include "CGCall.h"
1623546Swosch#include "CodeGenFunction.h"
1723546Swosch#include "CodeGenModule.h"
1823546Swosch#include "clang/Basic/TargetInfo.h"
1939161Sobrien#include "clang/AST/Decl.h"
2015903Swosch#include "clang/AST/DeclCXX.h"
2139161Sobrien#include "clang/AST/DeclObjC.h"
2215903Swosch#include "clang/Frontend/CompileOptions.h"
2315903Swosch#include "llvm/Attributes.h"
2415903Swosch#include "llvm/Support/CallSite.h"
2515903Swosch#include "llvm/Target/TargetData.h"
2615903Swosch
2715903Swosch#include "ABIInfo.h"
2815903Swosch
2932216Swoschusing namespace clang;
3032216Swoschusing namespace CodeGen;
3132216Swosch
3232216Swosch/***/
33218525Skeramida
34218525Skeramida// FIXME: Use iterator and sidestep silly type array creation.
3515903Swosch
3615903Swoschconst
3715903SwoschCGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
3815903Swosch  return getFunctionInfo(FTNP->getResultType(),
39119057Sobrien                         llvm::SmallVector<QualType, 16>());
4015903Swosch}
4115903Swosch
4215903Swoschconst
4315903SwoschCGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
4415903Swosch  llvm::SmallVector<QualType, 16> ArgTys;
4515903Swosch  // FIXME: Kill copy.
4615903Swosch  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
4765501Sobrien    ArgTys.push_back(FTP->getArgType(i));
4815903Swosch  return getFunctionInfo(FTP->getResultType(), ArgTys);
49186894Sbz}
5015903Swosch
51186894Sbzconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
5215903Swosch  llvm::SmallVector<QualType, 16> ArgTys;
5353033Sphantom  // Add the 'this' pointer unless this is a static method.
5415903Swosch  if (MD->isInstance())
5515903Swosch    ArgTys.push_back(MD->getThisType(Context));
5615903Swosch
5715903Swosch  const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
5815903Swosch  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
5939161Sobrien    ArgTys.push_back(FTP->getArgType(i));
6015903Swosch  return getFunctionInfo(FTP->getResultType(), ArgTys);
6139161Sobrien}
6215903Swosch
6315903Swoschconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
6415903Swosch  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
6515903Swosch    if (MD->isInstance())
66223596Sse      return getFunctionInfo(MD);
67223596Sse
68223596Sse  const FunctionType *FTy = FD->getType()->getAsFunctionType();
69223596Sse  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
70223596Sse    return getFunctionInfo(FTP);
71223596Sse  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
72223596Sse}
73223596Sse
74223596Sseconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
7515903Swosch  llvm::SmallVector<QualType, 16> ArgTys;
7615903Swosch  ArgTys.push_back(MD->getSelfDecl()->getType());
7715903Swosch  ArgTys.push_back(Context.getObjCSelType());
7815903Swosch  // FIXME: Kill copy?
7915903Swosch  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
8015903Swosch         e = MD->param_end(); i != e; ++i)
8115903Swosch    ArgTys.push_back((*i)->getType());
8215903Swosch  return getFunctionInfo(MD->getResultType(), ArgTys);
8315903Swosch}
8415903Swosch
8515903Swoschconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
8615903Swosch                                                    const CallArgList &Args) {
8715903Swosch  // FIXME: Kill copy.
8815903Swosch  llvm::SmallVector<QualType, 16> ArgTys;
8915903Swosch  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
9015903Swosch       i != e; ++i)
9115903Swosch    ArgTys.push_back(i->second);
9215903Swosch  return getFunctionInfo(ResTy, ArgTys);
9315903Swosch}
9415903Swosch
9515903Swoschconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
9615903Swosch                                                  const FunctionArgList &Args) {
9715903Swosch  // FIXME: Kill copy.
9815903Swosch  llvm::SmallVector<QualType, 16> ArgTys;
9915903Swosch  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
10015903Swosch       i != e; ++i)
10115903Swosch    ArgTys.push_back(i->second);
10215903Swosch  return getFunctionInfo(ResTy, ArgTys);
10315903Swosch}
10415903Swosch
10590627Sphantomconst CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
10615903Swosch                               const llvm::SmallVector<QualType, 16> &ArgTys) {
10790626Sphantom  // Lookup or create unique function info.
10815903Swosch  llvm::FoldingSetNodeID ID;
10990626Sphantom  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
11015903Swosch
11161462Sghelmer  void *InsertPos = 0;
11215903Swosch  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
11332216Swosch  if (FI)
11415903Swosch    return *FI;
11594982Sru
11694982Sru  // Construct the function info.
11794982Sru  FI = new CGFunctionInfo(ResTy, ArgTys);
118164411Sru  FunctionInfos.InsertNode(FI, InsertPos);
119156813Sru
120248751Semaste  // Compute ABI information.
121156836Sru  getABIInfo().computeInfo(*FI, getContext());
122156836Sru
123164411Sru  return *FI;
124156813Sru}
12514968Swosch
12639161SobrienCGFunctionInfo::CGFunctionInfo(QualType ResTy,
12739161Sobrien                               const llvm::SmallVector<QualType, 16> &ArgTys) {
12814573Swosch  NumArgs = ArgTys.size();
12914968Swosch  Args = new ArgInfo[1 + NumArgs];
13014573Swosch  Args[0].type = ResTy;
131111853Sru  for (unsigned i = 0; i < NumArgs; ++i)
132111853Sru    Args[1 + i].type = ArgTys[i];
133111853Sru}
13465501Sobrien
135111853Sru/***/
13648204Sjmg
13748204Sjmgvoid CodeGenTypes::GetExpandedTypes(QualType Ty,
13848204Sjmg                                    std::vector<const llvm::Type*> &ArgTys) {
13948204Sjmg  const RecordType *RT = Ty->getAsStructureType();
14014573Swosch  assert(RT && "Can only expand structure types.");
14132226Ssteve  const RecordDecl *RD = RT->getDecl();
14232226Ssteve  assert(!RD->hasFlexibleArrayMember() &&
143218525Skeramida         "Cannot expand structure with flexible array.");
14414573Swosch
14514573Swosch  for (RecordDecl::field_iterator i = RD->field_begin(Context),
14614968Swosch         e = RD->field_end(Context); i != e; ++i) {
14714968Swosch    const FieldDecl *FD = *i;
14814968Swosch    assert(!FD->isBitField() &&
14914573Swosch           "Cannot expand structure with bit-field members.");
15014968Swosch
15114968Swosch    QualType FT = FD->getType();
15214968Swosch    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
15339161Sobrien      GetExpandedTypes(FT, ArgTys);
15439161Sobrien    } else {
15514968Swosch      ArgTys.push_back(ConvertType(FT));
15614968Swosch    }
157223596Sse  }
158223596Sse}
159223596Sse
160223596Ssellvm::Function::arg_iterator
161223596SseCodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
16214968Swosch                                    llvm::Function::arg_iterator AI) {
16314968Swosch  const RecordType *RT = Ty->getAsStructureType();
16414968Swosch  assert(RT && "Can only expand structure types.");
16514968Swosch
16614968Swosch  RecordDecl *RD = RT->getDecl();
16714968Swosch  assert(LV.isSimple() &&
16814968Swosch         "Unexpected non-simple lvalue during struct expansion.");
16914968Swosch  llvm::Value *Addr = LV.getAddress();
17014968Swosch  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
17114968Swosch         e = RD->field_end(getContext()); i != e; ++i) {
17214968Swosch    FieldDecl *FD = *i;
17314968Swosch    QualType FT = FD->getType();
17414968Swosch
17514968Swosch    // FIXME: What are the right qualifiers here?
17614968Swosch    LValue LV = EmitLValueForField(Addr, FD, false, 0);
17714968Swosch    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
17890626Sphantom      AI = ExpandTypeFromArgs(FT, LV, AI);
17990626Sphantom    } else {
18061462Sghelmer      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
18114968Swosch      ++AI;
18232216Swosch    }
18332216Swosch  }
184245752Sbrooks
185245752Sbrooks  return AI;
186245752Sbrooks}
187245752Sbrooks
188245752Sbrooksvoid
189245752SbrooksCodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
190245752Sbrooks                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
191245752Sbrooks  const RecordType *RT = Ty->getAsStructureType();
192245752Sbrooks  assert(RT && "Can only expand structure types.");
19314968Swosch
194125494Sru  RecordDecl *RD = RT->getDecl();
195125494Sru  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
196125494Sru  llvm::Value *Addr = RV.getAggregateAddr();
197125494Sru  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
19834678Sbde         e = RD->field_end(getContext()); i != e; ++i) {
19923546Swosch    FieldDecl *FD = *i;
20094982Sru    QualType FT = FD->getType();
201164411Sru
202156813Sru    // FIXME: What are the right qualifiers here?
203156813Sru    LValue LV = EmitLValueForField(Addr, FD, false, 0);
204156813Sru    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
205156813Sru      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
206156813Sru    } else {
207156813Sru      RValue RV = EmitLoadOfLValue(LV, FT);
208156813Sru      assert(RV.isScalar() &&
209156813Sru             "Unexpected non-scalar rvalue during struct expansion.");
210156813Sru      Args.push_back(RV.getScalarVal());
211156813Sru    }
212156813Sru  }
213156813Sru}
214156813Sru
215228158Sfjoe/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
216172832Sru/// a pointer to an object of type \arg Ty.
217156869Sru///
218156869Sru/// This safely handles the case when the src type is smaller than the
219156813Sru/// destination type; in this situation the values of bits which not
220228120Sfjoe/// present in the src are undefined.
221228120Sfjoestatic llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
222228120Sfjoe                                      const llvm::Type *Ty,
223156813Sru                                      CodeGenFunction &CGF) {
224156813Sru  const llvm::Type *SrcTy =
225156813Sru    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
226156813Sru  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
227156813Sru  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
228156813Sru
229156813Sru  // If load is legal, just bitcast the src pointer.
230156813Sru  if (SrcSize >= DstSize) {
231156813Sru    // Generally SrcSize is never greater than DstSize, since this means we are
232156813Sru    // losing bits. However, this can happen in cases where the structure has
233156813Sru    // additional padding, for example due to a user specified alignment.
234220359Simp    //
235183242Ssam    // FIXME: Assert that we aren't truncating non-padding bits when have access
236156813Sru    // to that information.
237183242Ssam    llvm::Value *Casted =
238183242Ssam      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
239162210Simp    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
240183242Ssam    // FIXME: Use better alignment / avoid requiring aligned load.
241241823Smarcel    Load->setAlignment(1);
242156813Sru    return Load;
243156813Sru  } else {
244156813Sru    // Otherwise do coercion through memory. This is stupid, but
245156813Sru    // simple.
246156813Sru    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
247156813Sru    llvm::Value *Casted =
248156813Sru      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
249156813Sru    llvm::StoreInst *Store =
250156813Sru      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
251156813Sru    // FIXME: Use better alignment / avoid requiring aligned store.
252222090Simp    Store->setAlignment(1);
253156813Sru    return CGF.Builder.CreateLoad(Tmp);
254156813Sru  }
255179815Sdougb}
256183242Ssam
257166255Sdelphij/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
258156813Sru/// where the source and destination may have different types.
259229319Srwatson///
260163861Sjb/// This safely handles the case when the src type is larger than the
261156813Sru/// destination type; the upper bits of the src will be lost.
262250659Sbrooksstatic void CreateCoercedStore(llvm::Value *Src,
263156813Sru                               llvm::Value *DstPtr,
264183242Ssam                               CodeGenFunction &CGF) {
265156813Sru  const llvm::Type *SrcTy = Src->getType();
266156813Sru  const llvm::Type *DstTy =
267156813Sru    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
268156813Sru
269235654Smarcel  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
270156813Sru  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
271183242Ssam
272250658Sbrooks  // If store is legal, just bitcast the src pointer.
273156813Sru  if (SrcSize <= DstSize) {
274156813Sru    llvm::Value *Casted =
275183242Ssam      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
276156813Sru    // FIXME: Use better alignment / avoid requiring aligned store.
277222090Simp    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
278156813Sru  } else {
279156813Sru    // Otherwise do coercion through memory. This is stupid, but
280156813Sru    // simple.
281156813Sru
282220359Simp    // Generally SrcSize is never greater than DstSize, since this means we are
283156813Sru    // losing bits. However, this can happen in cases where the structure has
284156813Sru    // additional padding, for example due to a user specified alignment.
285221266Sbz    //
286156813Sru    // FIXME: Assert that we aren't truncating non-padding bits when have access
287156813Sru    // to that information.
288172832Sru    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
289156813Sru    CGF.Builder.CreateStore(Src, Tmp);
290183242Ssam    llvm::Value *Casted =
291156813Sru      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
292183242Ssam    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
293240404Sobrien    // FIXME: Use better alignment / avoid requiring aligned load.
294156813Sru    Load->setAlignment(1);
295222185Simp    CGF.Builder.CreateStore(Load, DstPtr);
296170644Ssepotvin  }
297246827Sdes}
298183242Ssam
299157115Sru/***/
300156813Sru
301156813Srubool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
302156813Sru  return FI.getReturnInfo().isIndirect();
303183242Ssam}
304156813Sru
305235655Smarcelconst llvm::FunctionType *
306183242SsamCodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
307156813Sru  std::vector<const llvm::Type*> ArgTys;
308183242Ssam
309156813Sru  const llvm::Type *ResultType = 0;
310183242Ssam
311156813Sru  QualType RetTy = FI.getReturnType();
312183242Ssam  const ABIArgInfo &RetAI = FI.getReturnInfo();
313156813Sru  switch (RetAI.getKind()) {
314156813Sru  case ABIArgInfo::Expand:
315156813Sru    assert(0 && "Invalid ABI kind for return argument");
316158115Sume
317183242Ssam  case ABIArgInfo::Extend:
318156813Sru  case ABIArgInfo::Direct:
319156813Sru    ResultType = ConvertType(RetTy);
320156813Sru    break;
321245606Seadler
322156813Sru  case ABIArgInfo::Indirect: {
323238010Sglebius    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
324183242Ssam    ResultType = llvm::Type::VoidTy;
325183242Ssam    const llvm::Type *STy = ConvertType(RetTy);
326183242Ssam    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
327183242Ssam    break;
328228196Sfjoe  }
329183242Ssam
330156813Sru  case ABIArgInfo::Ignore:
331156813Sru    ResultType = llvm::Type::VoidTy;
332156813Sru    break;
333183242Ssam
334156813Sru  case ABIArgInfo::Coerce:
335156813Sru    ResultType = RetAI.getCoerceToType();
336156813Sru    break;
337244527Semaste  }
338244527Semaste
339244527Semaste  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
340169724Skan         ie = FI.arg_end(); it != ie; ++it) {
341169524Sdeischen    const ABIArgInfo &AI = it->info;
342156813Sru
343244527Semaste    switch (AI.getKind()) {
344156813Sru    case ABIArgInfo::Ignore:
345183242Ssam      break;
346183242Ssam
347156813Sru    case ABIArgInfo::Coerce:
348156813Sru      ArgTys.push_back(AI.getCoerceToType());
349223209Sed      break;
350183242Ssam
351168409Spjd    case ABIArgInfo::Indirect: {
352175617Sru      // indirect arguments are always on the stack, which is addr space #0.
353175617Sru      const llvm::Type *LTy = ConvertTypeForMem(it->type);
354220359Simp      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
355220359Simp      break;
356245539Sandrew    }
357246074Sgabor
358220359Simp    case ABIArgInfo::Extend:
359220359Simp    case ABIArgInfo::Direct:
360220359Simp      ArgTys.push_back(ConvertType(it->type));
361220359Simp      break;
362220359Simp
363244527Semaste    case ABIArgInfo::Expand:
364238438Sdteske      GetExpandedTypes(it->type, ArgTys);
365244527Semaste      break;
366231057Sdim    }
367228158Sfjoe  }
368245803Stheraven
369220359Simp  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
370220359Simp}
371237612Sobrien
372246827Sdesvoid CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
373245241Sbrooks                                           const Decl *TargetDecl,
374235537Sgber                                           AttributeListType &PAL) {
375234782Skib  unsigned FuncAttrs = 0;
376245527Sbz  unsigned RetAttrs = 0;
377234782Skib
378220359Simp  // FIXME: handle sseregparm someday...
379220359Simp  if (TargetDecl) {
380220359Simp    if (TargetDecl->hasAttr<NoThrowAttr>())
381220359Simp      FuncAttrs |= llvm::Attribute::NoUnwind;
382220359Simp    if (TargetDecl->hasAttr<NoReturnAttr>())
383220359Simp      FuncAttrs |= llvm::Attribute::NoReturn;
384221726Sru    if (TargetDecl->hasAttr<ConstAttr>())
385220359Simp      FuncAttrs |= llvm::Attribute::ReadNone;
386220359Simp    else if (TargetDecl->hasAttr<PureAttr>())
387220359Simp      FuncAttrs |= llvm::Attribute::ReadOnly;
388220359Simp  }
389220359Simp
390220359Simp  if (CompileOpts.DisableRedZone)
391220359Simp    FuncAttrs |= llvm::Attribute::NoRedZone;
392246354Sandrew  if (CompileOpts.NoImplicitFloat)
393235133Sdim    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
394246259Sdim
395246354Sandrew  QualType RetTy = FI.getReturnType();
396246354Sandrew  unsigned Index = 1;
397246354Sandrew  const ABIArgInfo &RetAI = FI.getReturnInfo();
398246354Sandrew  switch (RetAI.getKind()) {
399220359Simp  case ABIArgInfo::Extend:
400246259Sdim   if (RetTy->isSignedIntegerType()) {
401220359Simp     RetAttrs |= llvm::Attribute::SExt;
402248856Sandrew   } else if (RetTy->isUnsignedIntegerType()) {
403248856Sandrew     RetAttrs |= llvm::Attribute::ZExt;
404248856Sandrew   }
405242624Sbrooks   // FALLTHROUGH
406242624Sbrooks  case ABIArgInfo::Direct:
407242624Sbrooks    break;
408242624Sbrooks
409227775Snwhitehorn  case ABIArgInfo::Indirect:
410227775Snwhitehorn    PAL.push_back(llvm::AttributeWithIndex::get(Index,
411220359Simp                                                llvm::Attribute::StructRet |
412220359Simp                                                llvm::Attribute::NoAlias));
413220359Simp    ++Index;
414220359Simp    // sret disables readnone and readonly
415220359Simp    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
416220359Simp                   llvm::Attribute::ReadNone);
417220359Simp    break;
418220359Simp
419220359Simp  case ABIArgInfo::Ignore:
420220359Simp  case ABIArgInfo::Coerce:
421156813Sru    break;
422156813Sru
423156813Sru  case ABIArgInfo::Expand:
424156813Sru    assert(0 && "Invalid ABI kind for return argument");
425156813Sru  }
426156813Sru
427156813Sru  if (RetAttrs)
428156813Sru    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
429156813Sru
430156813Sru  // FIXME: we need to honour command line settings also...
431156813Sru  // FIXME: RegParm should be reduced in case of nested functions and/or global
432156813Sru  // register variable.
433220359Simp  signed RegParm = 0;
434156813Sru  if (TargetDecl)
435156813Sru    if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
436156813Sru      RegParm = RegParmAttr->getNumParams();
437156813Sru
438220359Simp  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
439156813Sru  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
440156813Sru         ie = FI.arg_end(); it != ie; ++it) {
441156813Sru    QualType ParamType = it->type;
442156813Sru    const ABIArgInfo &AI = it->info;
443156813Sru    unsigned Attributes = 0;
444156813Sru
445156813Sru    switch (AI.getKind()) {
446156813Sru    case ABIArgInfo::Coerce:
447156813Sru      break;
448156813Sru
449156813Sru    case ABIArgInfo::Indirect:
450156813Sru      Attributes |= llvm::Attribute::ByVal;
451220359Simp      Attributes |=
452156813Sru        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
453156813Sru      // byval disables readnone and readonly.
454156813Sru      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
455172571Sru                     llvm::Attribute::ReadNone);
456156813Sru      break;
457172571Sru
458172571Sru    case ABIArgInfo::Extend:
459172571Sru     if (ParamType->isSignedIntegerType()) {
460172571Sru       Attributes |= llvm::Attribute::SExt;
461177714Sru     } else if (ParamType->isUnsignedIntegerType()) {
462172571Sru       Attributes |= llvm::Attribute::ZExt;
463172571Sru     }
464172571Sru     // FALLS THROUGH
465156813Sru    case ABIArgInfo::Direct:
466156813Sru      if (RegParm > 0 &&
467156813Sru          (ParamType->isIntegerType() || ParamType->isPointerType())) {
468156813Sru        RegParm -=
469156813Sru          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
470156813Sru        if (RegParm >= 0)
471156813Sru          Attributes |= llvm::Attribute::InReg;
472156813Sru      }
473156813Sru      // FIXME: handle sseregparm someday...
474156813Sru      break;
475246827Sdes
476246827Sdes    case ABIArgInfo::Ignore:
477246827Sdes      // Skip increment, no matching LLVM parameter.
478246827Sdes      continue;
479246833Sdes
480246827Sdes    case ABIArgInfo::Expand: {
481246827Sdes      std::vector<const llvm::Type*> Tys;
482246827Sdes      // FIXME: This is rather inefficient. Do we ever actually need to do
483157378Sphk      // anything here? The result should be just reconstructed on the other
484157378Sphk      // side, so extension should be a non-issue.
485157378Sphk      getTypes().GetExpandedTypes(ParamType, Tys);
486157378Sphk      Index += Tys.size();
487230972Srmh      continue;
488230972Srmh    }
489230972Srmh    }
490230972Srmh
491230972Srmh    if (Attributes)
492168409Spjd      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
493168409Spjd    ++Index;
494228158Sfjoe  }
495168409Spjd  if (FuncAttrs)
496168409Spjd    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
497156813Sru}
498156813Sru
499156813Sruvoid CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
500156813Sru                                         llvm::Function *Fn,
501156813Sru                                         const FunctionArgList &Args) {
502156813Sru  // FIXME: We no longer need the types from FunctionArgList; lift up and
503220401Suqs  // simplify.
504220401Suqs
505220401Suqs  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
506220401Suqs  llvm::Function::arg_iterator AI = Fn->arg_begin();
507220401Suqs
508183242Ssam  // Name the struct return argument.
509183242Ssam  if (CGM.ReturnTypeUsesSret(FI)) {
510183242Ssam    AI->setName("agg.result");
511183242Ssam    ++AI;
512183242Ssam  }
513202440Santoine
514202440Santoine  assert(FI.arg_size() == Args.size() &&
515202440Santoine         "Mismatch between function signature & arguments.");
516202440Santoine  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
517202440Santoine  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
518156813Sru       i != e; ++i, ++info_it) {
519156813Sru    const VarDecl *Arg = i->first;
520156813Sru    QualType Ty = info_it->type;
521156813Sru    const ABIArgInfo &ArgI = info_it->info;
522156813Sru
523156813Sru    switch (ArgI.getKind()) {
524156813Sru    case ABIArgInfo::Indirect: {
525156813Sru      llvm::Value* V = AI;
526156813Sru      if (hasAggregateLLVMType(Ty)) {
527183242Ssam        // Do nothing, aggregates and complex variables are accessed by
528183242Ssam        // reference.
529183242Ssam      } else {
530183242Ssam        // Load scalar value from indirect argument.
531156813Sru        V = EmitLoadOfScalar(V, false, Ty);
532222090Simp        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
533208964Srdivacky          // This must be a promotion, for something like
534222090Simp          // "void a(x) short x; {..."
535156813Sru          V = EmitScalarConversion(V, Ty, Arg->getType());
536156813Sru        }
537156813Sru      }
538232322Sdim      EmitParmDecl(*Arg, V);
539246131Sdim      break;
540246259Sdim    }
541232322Sdim
542232322Sdim    case ABIArgInfo::Extend:
543232322Sdim    case ABIArgInfo::Direct: {
544156813Sru      assert(AI != Fn->arg_end() && "Argument mismatch!");
545156813Sru      llvm::Value* V = AI;
546156813Sru      if (hasAggregateLLVMType(Ty)) {
547156813Sru        // Create a temporary alloca to hold the argument; the rest of
548156813Sru        // codegen expects to access aggregates & complex values by
549156813Sru        // reference.
550156813Sru        V = CreateTempAlloca(ConvertTypeForMem(Ty));
551156813Sru        Builder.CreateStore(AI, V);
552156813Sru      } else {
553166255Sdelphij        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
554156813Sru          // This must be a promotion, for something like
555221266Sbz          // "void a(x) short x; {..."
556156813Sru          V = EmitScalarConversion(V, Ty, Arg->getType());
557156813Sru        }
558156813Sru      }
559170644Ssepotvin      EmitParmDecl(*Arg, V);
560183242Ssam      break;
561183242Ssam    }
562183242Ssam
563156813Sru    case ABIArgInfo::Expand: {
564156813Sru      // If this structure was expanded into multiple arguments then
565156813Sru      // we need to create a temporary and reconstruct it from the
566156813Sru      // arguments.
567156813Sru      std::string Name = Arg->getNameAsString();
568156813Sru      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
569156813Sru                                           (Name + ".addr").c_str());
570156813Sru      // FIXME: What are the right qualifiers here?
571156813Sru      llvm::Function::arg_iterator End =
572156813Sru        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
573156813Sru      EmitParmDecl(*Arg, Temp);
574156813Sru
575174548Sru      // Name the arguments used in expansion and increment AI.
576174548Sru      unsigned Index = 0;
577174548Sru      for (; AI != End; ++AI, ++Index)
578174548Sru        AI->setName(Name + "." + llvm::utostr(Index));
579174548Sru      continue;
580208320Sjkim    }
581208320Sjkim
582174548Sru    case ABIArgInfo::Ignore:
583174548Sru      // Initialize the local variable appropriately.
584174548Sru      if (hasAggregateLLVMType(Ty)) {
585174548Sru        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
586174548Sru      } else {
587174548Sru        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
588174548Sru      }
589174548Sru
590174548Sru      // Skip increment, no matching LLVM parameter.
591174548Sru      continue;
592174548Sru
593174548Sru    case ABIArgInfo::Coerce: {
594174548Sru      assert(AI != Fn->arg_end() && "Argument mismatch!");
595174548Sru      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
596174548Sru      // result in a new alloca anyway, so we could just store into that
597240966Sbrooks      // directly if we broke the abstraction down more.
598240966Sbrooks      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
599240966Sbrooks      CreateCoercedStore(AI, V, *this);
600240966Sbrooks      // Match to what EmitParmDecl is expecting for this type.
601240966Sbrooks      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
602240966Sbrooks        V = EmitLoadOfScalar(V, false, Ty);
603240966Sbrooks        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
604240966Sbrooks          // This must be a promotion, for something like
605240966Sbrooks          // "void a(x) short x; {..."
606240966Sbrooks          V = EmitScalarConversion(V, Ty, Arg->getType());
607240966Sbrooks        }
608240966Sbrooks      }
609240966Sbrooks      EmitParmDecl(*Arg, V);
610240966Sbrooks      break;
611240966Sbrooks    }
612240966Sbrooks    }
613240966Sbrooks
614240966Sbrooks    ++AI;
615240966Sbrooks  }
616240966Sbrooks  assert(AI == Fn->arg_end() && "Argument mismatch!");
617240966Sbrooks}
618240966Sbrooks
619240966Sbrooksvoid CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
620240966Sbrooks                                         llvm::Value *ReturnValue) {
621240966Sbrooks  llvm::Value *RV = 0;
622240966Sbrooks
623240966Sbrooks  // Functions with no result always return void.
624228158Sfjoe  if (ReturnValue) {
625228158Sfjoe    QualType RetTy = FI.getReturnType();
626243393Ssjg    const ABIArgInfo &RetAI = FI.getReturnInfo();
627228158Sfjoe
628228158Sfjoe    switch (RetAI.getKind()) {
629228158Sfjoe    case ABIArgInfo::Indirect:
630228158Sfjoe      if (RetTy->isAnyComplexType()) {
631228158Sfjoe        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
632237612Sobrien        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
633243392Ssjg      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
634237612Sobrien        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
635243392Ssjg      } else {
636243392Ssjg        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
637243392Ssjg                          false, RetTy);
638237612Sobrien      }
639237612Sobrien      break;
640243392Ssjg
641237612Sobrien    case ABIArgInfo::Extend:
642237612Sobrien    case ABIArgInfo::Direct:
643237612Sobrien      // The internal return value temp always will have
644237612Sobrien      // pointer-to-return-type type.
645237612Sobrien      RV = Builder.CreateLoad(ReturnValue);
646164411Sru      break;
647156813Sru
648144893Sharti    case ABIArgInfo::Ignore:
649      break;
650
651    case ABIArgInfo::Coerce:
652      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
653      break;
654
655    case ABIArgInfo::Expand:
656      assert(0 && "Invalid ABI kind for return argument");
657    }
658  }
659
660  if (RV) {
661    Builder.CreateRet(RV);
662  } else {
663    Builder.CreateRetVoid();
664  }
665}
666
667RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
668  if (ArgType->isReferenceType())
669    return EmitReferenceBindingToExpr(E, ArgType);
670
671  return EmitAnyExprToTemp(E);
672}
673
674RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
675                                 llvm::Value *Callee,
676                                 const CallArgList &CallArgs,
677                                 const Decl *TargetDecl) {
678  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
679  llvm::SmallVector<llvm::Value*, 16> Args;
680
681  // Handle struct-return functions by passing a pointer to the
682  // location that we would like to return into.
683  QualType RetTy = CallInfo.getReturnType();
684  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
685
686
687  // If the call returns a temporary with struct return, create a temporary
688  // alloca to hold the result.
689  if (CGM.ReturnTypeUsesSret(CallInfo))
690    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
691
692  assert(CallInfo.arg_size() == CallArgs.size() &&
693         "Mismatch between function signature & arguments.");
694  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
695  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
696       I != E; ++I, ++info_it) {
697    const ABIArgInfo &ArgInfo = info_it->info;
698    RValue RV = I->first;
699
700    switch (ArgInfo.getKind()) {
701    case ABIArgInfo::Indirect:
702      if (RV.isScalar() || RV.isComplex()) {
703        // Make a temporary alloca to pass the argument.
704        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
705        if (RV.isScalar())
706          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
707        else
708          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
709      } else {
710        Args.push_back(RV.getAggregateAddr());
711      }
712      break;
713
714    case ABIArgInfo::Extend:
715    case ABIArgInfo::Direct:
716      if (RV.isScalar()) {
717        Args.push_back(RV.getScalarVal());
718      } else if (RV.isComplex()) {
719        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
720        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
721        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
722        Args.push_back(Tmp);
723      } else {
724        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
725      }
726      break;
727
728    case ABIArgInfo::Ignore:
729      break;
730
731    case ABIArgInfo::Coerce: {
732      // FIXME: Avoid the conversion through memory if possible.
733      llvm::Value *SrcPtr;
734      if (RV.isScalar()) {
735        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
736        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
737      } else if (RV.isComplex()) {
738        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
739        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
740      } else
741        SrcPtr = RV.getAggregateAddr();
742      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
743                                       *this));
744      break;
745    }
746
747    case ABIArgInfo::Expand:
748      ExpandTypeToArgs(I->second, RV, Args);
749      break;
750    }
751  }
752
753  // If the callee is a bitcast of a function to a varargs pointer to function
754  // type, check to see if we can remove the bitcast.  This handles some cases
755  // with unprototyped functions.
756  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
757    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
758      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
759      const llvm::FunctionType *CurFT =
760        cast<llvm::FunctionType>(CurPT->getElementType());
761      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
762
763      if (CE->getOpcode() == llvm::Instruction::BitCast &&
764          ActualFT->getReturnType() == CurFT->getReturnType() &&
765          ActualFT->getNumParams() == CurFT->getNumParams()) {
766        bool ArgsMatch = true;
767        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
768          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
769            ArgsMatch = false;
770            break;
771          }
772
773        // Strip the cast if we can get away with it.  This is a nice cleanup,
774        // but also allows us to inline the function at -O0 if it is marked
775        // always_inline.
776        if (ArgsMatch)
777          Callee = CalleeF;
778      }
779    }
780
781
782  llvm::BasicBlock *InvokeDest = getInvokeDest();
783  CodeGen::AttributeListType AttributeList;
784  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
785  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
786                                                   AttributeList.end());
787
788  llvm::CallSite CS;
789  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
790    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
791  } else {
792    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
793    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
794                              Args.data(), Args.data()+Args.size());
795    EmitBlock(Cont);
796  }
797
798  CS.setAttributes(Attrs);
799  if (const llvm::Function *F =
800        dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
801    CS.setCallingConv(F->getCallingConv());
802
803  // If the call doesn't return, finish the basic block and clear the
804  // insertion point; this allows the rest of IRgen to discard
805  // unreachable code.
806  if (CS.doesNotReturn()) {
807    Builder.CreateUnreachable();
808    Builder.ClearInsertionPoint();
809
810    // FIXME: For now, emit a dummy basic block because expr emitters in
811    // generally are not ready to handle emitting expressions at unreachable
812    // points.
813    EnsureInsertPoint();
814
815    // Return a reasonable RValue.
816    return GetUndefRValue(RetTy);
817  }
818
819  llvm::Instruction *CI = CS.getInstruction();
820  if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
821    CI->setName("call");
822
823  switch (RetAI.getKind()) {
824  case ABIArgInfo::Indirect:
825    if (RetTy->isAnyComplexType())
826      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
827    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
828      return RValue::getAggregate(Args[0]);
829    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
830
831  case ABIArgInfo::Extend:
832  case ABIArgInfo::Direct:
833    if (RetTy->isAnyComplexType()) {
834      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
835      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
836      return RValue::getComplex(std::make_pair(Real, Imag));
837    }
838    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
839      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
840      Builder.CreateStore(CI, V);
841      return RValue::getAggregate(V);
842    }
843    return RValue::get(CI);
844
845  case ABIArgInfo::Ignore:
846    // If we are ignoring an argument that had a result, make sure to
847    // construct the appropriate return value for our caller.
848    return GetUndefRValue(RetTy);
849
850  case ABIArgInfo::Coerce: {
851    // FIXME: Avoid the conversion through memory if possible.
852    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
853    CreateCoercedStore(CI, V, *this);
854    if (RetTy->isAnyComplexType())
855      return RValue::getComplex(LoadComplexFromAddr(V, false));
856    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
857      return RValue::getAggregate(V);
858    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
859  }
860
861  case ABIArgInfo::Expand:
862    assert(0 && "Invalid ABI kind for return argument");
863  }
864
865  assert(0 && "Unhandled ABIArgInfo::Kind");
866  return RValue::get(0);
867}
868
869/* VarArg handling */
870
871llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
872  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
873}
874