CGCall.cpp revision 193576
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/Frontend/CompileOptions.h"
23#include "llvm/Attributes.h"
24#include "llvm/Support/CallSite.h"
25#include "llvm/Target/TargetData.h"
26
27#include "ABIInfo.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32/***/
33
34// FIXME: Use iterator and sidestep silly type array creation.
35
36const
37CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
38  return getFunctionInfo(FTNP->getResultType(),
39                         llvm::SmallVector<QualType, 16>());
40}
41
42const
43CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
44  llvm::SmallVector<QualType, 16> ArgTys;
45  // FIXME: Kill copy.
46  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
47    ArgTys.push_back(FTP->getArgType(i));
48  return getFunctionInfo(FTP->getResultType(), ArgTys);
49}
50
51const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
52  llvm::SmallVector<QualType, 16> ArgTys;
53  // Add the 'this' pointer unless this is a static method.
54  if (MD->isInstance())
55    ArgTys.push_back(MD->getThisType(Context));
56
57  const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
58  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
59    ArgTys.push_back(FTP->getArgType(i));
60  return getFunctionInfo(FTP->getResultType(), ArgTys);
61}
62
63const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
64  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
65    if (MD->isInstance())
66      return getFunctionInfo(MD);
67
68  const FunctionType *FTy = FD->getType()->getAsFunctionType();
69  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
70    return getFunctionInfo(FTP);
71  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
72}
73
74const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
75  llvm::SmallVector<QualType, 16> ArgTys;
76  ArgTys.push_back(MD->getSelfDecl()->getType());
77  ArgTys.push_back(Context.getObjCSelType());
78  // FIXME: Kill copy?
79  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
80         e = MD->param_end(); i != e; ++i)
81    ArgTys.push_back((*i)->getType());
82  return getFunctionInfo(MD->getResultType(), ArgTys);
83}
84
85const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
86                                                    const CallArgList &Args) {
87  // FIXME: Kill copy.
88  llvm::SmallVector<QualType, 16> ArgTys;
89  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
90       i != e; ++i)
91    ArgTys.push_back(i->second);
92  return getFunctionInfo(ResTy, ArgTys);
93}
94
95const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
96                                                  const FunctionArgList &Args) {
97  // FIXME: Kill copy.
98  llvm::SmallVector<QualType, 16> ArgTys;
99  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
100       i != e; ++i)
101    ArgTys.push_back(i->second);
102  return getFunctionInfo(ResTy, ArgTys);
103}
104
105const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
106                               const llvm::SmallVector<QualType, 16> &ArgTys) {
107  // Lookup or create unique function info.
108  llvm::FoldingSetNodeID ID;
109  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
110
111  void *InsertPos = 0;
112  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
113  if (FI)
114    return *FI;
115
116  // Construct the function info.
117  FI = new CGFunctionInfo(ResTy, ArgTys);
118  FunctionInfos.InsertNode(FI, InsertPos);
119
120  // Compute ABI information.
121  getABIInfo().computeInfo(*FI, getContext());
122
123  return *FI;
124}
125
126CGFunctionInfo::CGFunctionInfo(QualType ResTy,
127                               const llvm::SmallVector<QualType, 16> &ArgTys) {
128  NumArgs = ArgTys.size();
129  Args = new ArgInfo[1 + NumArgs];
130  Args[0].type = ResTy;
131  for (unsigned i = 0; i < NumArgs; ++i)
132    Args[1 + i].type = ArgTys[i];
133}
134
135/***/
136
137void CodeGenTypes::GetExpandedTypes(QualType Ty,
138                                    std::vector<const llvm::Type*> &ArgTys) {
139  const RecordType *RT = Ty->getAsStructureType();
140  assert(RT && "Can only expand structure types.");
141  const RecordDecl *RD = RT->getDecl();
142  assert(!RD->hasFlexibleArrayMember() &&
143         "Cannot expand structure with flexible array.");
144
145  for (RecordDecl::field_iterator i = RD->field_begin(Context),
146         e = RD->field_end(Context); i != e; ++i) {
147    const FieldDecl *FD = *i;
148    assert(!FD->isBitField() &&
149           "Cannot expand structure with bit-field members.");
150
151    QualType FT = FD->getType();
152    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
153      GetExpandedTypes(FT, ArgTys);
154    } else {
155      ArgTys.push_back(ConvertType(FT));
156    }
157  }
158}
159
160llvm::Function::arg_iterator
161CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
162                                    llvm::Function::arg_iterator AI) {
163  const RecordType *RT = Ty->getAsStructureType();
164  assert(RT && "Can only expand structure types.");
165
166  RecordDecl *RD = RT->getDecl();
167  assert(LV.isSimple() &&
168         "Unexpected non-simple lvalue during struct expansion.");
169  llvm::Value *Addr = LV.getAddress();
170  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
171         e = RD->field_end(getContext()); i != e; ++i) {
172    FieldDecl *FD = *i;
173    QualType FT = FD->getType();
174
175    // FIXME: What are the right qualifiers here?
176    LValue LV = EmitLValueForField(Addr, FD, false, 0);
177    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
178      AI = ExpandTypeFromArgs(FT, LV, AI);
179    } else {
180      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
181      ++AI;
182    }
183  }
184
185  return AI;
186}
187
188void
189CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
190                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
191  const RecordType *RT = Ty->getAsStructureType();
192  assert(RT && "Can only expand structure types.");
193
194  RecordDecl *RD = RT->getDecl();
195  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
196  llvm::Value *Addr = RV.getAggregateAddr();
197  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
198         e = RD->field_end(getContext()); i != e; ++i) {
199    FieldDecl *FD = *i;
200    QualType FT = FD->getType();
201
202    // FIXME: What are the right qualifiers here?
203    LValue LV = EmitLValueForField(Addr, FD, false, 0);
204    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
205      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
206    } else {
207      RValue RV = EmitLoadOfLValue(LV, FT);
208      assert(RV.isScalar() &&
209             "Unexpected non-scalar rvalue during struct expansion.");
210      Args.push_back(RV.getScalarVal());
211    }
212  }
213}
214
215/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
216/// a pointer to an object of type \arg Ty.
217///
218/// This safely handles the case when the src type is smaller than the
219/// destination type; in this situation the values of bits which not
220/// present in the src are undefined.
221static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
222                                      const llvm::Type *Ty,
223                                      CodeGenFunction &CGF) {
224  const llvm::Type *SrcTy =
225    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
226  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
227  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
228
229  // If load is legal, just bitcast the src pointer.
230  if (SrcSize >= DstSize) {
231    // Generally SrcSize is never greater than DstSize, since this means we are
232    // losing bits. However, this can happen in cases where the structure has
233    // additional padding, for example due to a user specified alignment.
234    //
235    // FIXME: Assert that we aren't truncating non-padding bits when have access
236    // to that information.
237    llvm::Value *Casted =
238      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
239    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
240    // FIXME: Use better alignment / avoid requiring aligned load.
241    Load->setAlignment(1);
242    return Load;
243  } else {
244    // Otherwise do coercion through memory. This is stupid, but
245    // simple.
246    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
247    llvm::Value *Casted =
248      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
249    llvm::StoreInst *Store =
250      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
251    // FIXME: Use better alignment / avoid requiring aligned store.
252    Store->setAlignment(1);
253    return CGF.Builder.CreateLoad(Tmp);
254  }
255}
256
257/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
258/// where the source and destination may have different types.
259///
260/// This safely handles the case when the src type is larger than the
261/// destination type; the upper bits of the src will be lost.
262static void CreateCoercedStore(llvm::Value *Src,
263                               llvm::Value *DstPtr,
264                               CodeGenFunction &CGF) {
265  const llvm::Type *SrcTy = Src->getType();
266  const llvm::Type *DstTy =
267    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
268
269  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
270  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
271
272  // If store is legal, just bitcast the src pointer.
273  if (SrcSize <= DstSize) {
274    llvm::Value *Casted =
275      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
276    // FIXME: Use better alignment / avoid requiring aligned store.
277    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
278  } else {
279    // Otherwise do coercion through memory. This is stupid, but
280    // simple.
281
282    // Generally SrcSize is never greater than DstSize, since this means we are
283    // losing bits. However, this can happen in cases where the structure has
284    // additional padding, for example due to a user specified alignment.
285    //
286    // FIXME: Assert that we aren't truncating non-padding bits when have access
287    // to that information.
288    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
289    CGF.Builder.CreateStore(Src, Tmp);
290    llvm::Value *Casted =
291      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
292    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
293    // FIXME: Use better alignment / avoid requiring aligned load.
294    Load->setAlignment(1);
295    CGF.Builder.CreateStore(Load, DstPtr);
296  }
297}
298
299/***/
300
301bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
302  return FI.getReturnInfo().isIndirect();
303}
304
305const llvm::FunctionType *
306CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
307  std::vector<const llvm::Type*> ArgTys;
308
309  const llvm::Type *ResultType = 0;
310
311  QualType RetTy = FI.getReturnType();
312  const ABIArgInfo &RetAI = FI.getReturnInfo();
313  switch (RetAI.getKind()) {
314  case ABIArgInfo::Expand:
315    assert(0 && "Invalid ABI kind for return argument");
316
317  case ABIArgInfo::Direct:
318    ResultType = ConvertType(RetTy);
319    break;
320
321  case ABIArgInfo::Indirect: {
322    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
323    ResultType = llvm::Type::VoidTy;
324    const llvm::Type *STy = ConvertType(RetTy);
325    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
326    break;
327  }
328
329  case ABIArgInfo::Ignore:
330    ResultType = llvm::Type::VoidTy;
331    break;
332
333  case ABIArgInfo::Coerce:
334    ResultType = RetAI.getCoerceToType();
335    break;
336  }
337
338  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
339         ie = FI.arg_end(); it != ie; ++it) {
340    const ABIArgInfo &AI = it->info;
341
342    switch (AI.getKind()) {
343    case ABIArgInfo::Ignore:
344      break;
345
346    case ABIArgInfo::Coerce:
347      ArgTys.push_back(AI.getCoerceToType());
348      break;
349
350    case ABIArgInfo::Indirect: {
351      // indirect arguments are always on the stack, which is addr space #0.
352      const llvm::Type *LTy = ConvertTypeForMem(it->type);
353      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
354      break;
355    }
356
357    case ABIArgInfo::Direct:
358      ArgTys.push_back(ConvertType(it->type));
359      break;
360
361    case ABIArgInfo::Expand:
362      GetExpandedTypes(it->type, ArgTys);
363      break;
364    }
365  }
366
367  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
368}
369
370void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
371                                           const Decl *TargetDecl,
372                                           AttributeListType &PAL) {
373  unsigned FuncAttrs = 0;
374  unsigned RetAttrs = 0;
375
376  // FIXME: handle sseregparm someday...
377  if (TargetDecl) {
378    if (TargetDecl->hasAttr<NoThrowAttr>())
379      FuncAttrs |= llvm::Attribute::NoUnwind;
380    if (TargetDecl->hasAttr<NoReturnAttr>())
381      FuncAttrs |= llvm::Attribute::NoReturn;
382    if (TargetDecl->hasAttr<ConstAttr>())
383      FuncAttrs |= llvm::Attribute::ReadNone;
384    else if (TargetDecl->hasAttr<PureAttr>())
385      FuncAttrs |= llvm::Attribute::ReadOnly;
386  }
387
388  if (CompileOpts.DisableRedZone)
389    FuncAttrs |= llvm::Attribute::NoRedZone;
390  if (CompileOpts.NoImplicitFloat)
391    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
392
393  QualType RetTy = FI.getReturnType();
394  unsigned Index = 1;
395  const ABIArgInfo &RetAI = FI.getReturnInfo();
396  switch (RetAI.getKind()) {
397  case ABIArgInfo::Direct:
398    if (RetTy->isPromotableIntegerType()) {
399      if (RetTy->isSignedIntegerType()) {
400        RetAttrs |= llvm::Attribute::SExt;
401      } else if (RetTy->isUnsignedIntegerType()) {
402        RetAttrs |= llvm::Attribute::ZExt;
403      }
404    }
405    break;
406
407  case ABIArgInfo::Indirect:
408    PAL.push_back(llvm::AttributeWithIndex::get(Index,
409                                                llvm::Attribute::StructRet |
410                                                llvm::Attribute::NoAlias));
411    ++Index;
412    // sret disables readnone and readonly
413    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
414                   llvm::Attribute::ReadNone);
415    break;
416
417  case ABIArgInfo::Ignore:
418  case ABIArgInfo::Coerce:
419    break;
420
421  case ABIArgInfo::Expand:
422    assert(0 && "Invalid ABI kind for return argument");
423  }
424
425  if (RetAttrs)
426    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
427
428  // FIXME: we need to honour command line settings also...
429  // FIXME: RegParm should be reduced in case of nested functions and/or global
430  // register variable.
431  signed RegParm = 0;
432  if (TargetDecl)
433    if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
434      RegParm = RegParmAttr->getNumParams();
435
436  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
437  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
438         ie = FI.arg_end(); it != ie; ++it) {
439    QualType ParamType = it->type;
440    const ABIArgInfo &AI = it->info;
441    unsigned Attributes = 0;
442
443    switch (AI.getKind()) {
444    case ABIArgInfo::Coerce:
445      break;
446
447    case ABIArgInfo::Indirect:
448      Attributes |= llvm::Attribute::ByVal;
449      Attributes |=
450        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
451      // byval disables readnone and readonly.
452      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
453                     llvm::Attribute::ReadNone);
454      break;
455
456    case ABIArgInfo::Direct:
457      if (ParamType->isPromotableIntegerType()) {
458        if (ParamType->isSignedIntegerType()) {
459          Attributes |= llvm::Attribute::SExt;
460        } else if (ParamType->isUnsignedIntegerType()) {
461          Attributes |= llvm::Attribute::ZExt;
462        }
463      }
464      if (RegParm > 0 &&
465          (ParamType->isIntegerType() || ParamType->isPointerType())) {
466        RegParm -=
467          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
468        if (RegParm >= 0)
469          Attributes |= llvm::Attribute::InReg;
470      }
471      // FIXME: handle sseregparm someday...
472      break;
473
474    case ABIArgInfo::Ignore:
475      // Skip increment, no matching LLVM parameter.
476      continue;
477
478    case ABIArgInfo::Expand: {
479      std::vector<const llvm::Type*> Tys;
480      // FIXME: This is rather inefficient. Do we ever actually need to do
481      // anything here? The result should be just reconstructed on the other
482      // side, so extension should be a non-issue.
483      getTypes().GetExpandedTypes(ParamType, Tys);
484      Index += Tys.size();
485      continue;
486    }
487    }
488
489    if (Attributes)
490      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
491    ++Index;
492  }
493  if (FuncAttrs)
494    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
495}
496
497void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
498                                         llvm::Function *Fn,
499                                         const FunctionArgList &Args) {
500  // FIXME: We no longer need the types from FunctionArgList; lift up and
501  // simplify.
502
503  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
504  llvm::Function::arg_iterator AI = Fn->arg_begin();
505
506  // Name the struct return argument.
507  if (CGM.ReturnTypeUsesSret(FI)) {
508    AI->setName("agg.result");
509    ++AI;
510  }
511
512  assert(FI.arg_size() == Args.size() &&
513         "Mismatch between function signature & arguments.");
514  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
515  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
516       i != e; ++i, ++info_it) {
517    const VarDecl *Arg = i->first;
518    QualType Ty = info_it->type;
519    const ABIArgInfo &ArgI = info_it->info;
520
521    switch (ArgI.getKind()) {
522    case ABIArgInfo::Indirect: {
523      llvm::Value* V = AI;
524      if (hasAggregateLLVMType(Ty)) {
525        // Do nothing, aggregates and complex variables are accessed by
526        // reference.
527      } else {
528        // Load scalar value from indirect argument.
529        V = EmitLoadOfScalar(V, false, Ty);
530        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
531          // This must be a promotion, for something like
532          // "void a(x) short x; {..."
533          V = EmitScalarConversion(V, Ty, Arg->getType());
534        }
535      }
536      EmitParmDecl(*Arg, V);
537      break;
538    }
539
540    case ABIArgInfo::Direct: {
541      assert(AI != Fn->arg_end() && "Argument mismatch!");
542      llvm::Value* V = AI;
543      if (hasAggregateLLVMType(Ty)) {
544        // Create a temporary alloca to hold the argument; the rest of
545        // codegen expects to access aggregates & complex values by
546        // reference.
547        V = CreateTempAlloca(ConvertTypeForMem(Ty));
548        Builder.CreateStore(AI, V);
549      } else {
550        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
551          // This must be a promotion, for something like
552          // "void a(x) short x; {..."
553          V = EmitScalarConversion(V, Ty, Arg->getType());
554        }
555      }
556      EmitParmDecl(*Arg, V);
557      break;
558    }
559
560    case ABIArgInfo::Expand: {
561      // If this structure was expanded into multiple arguments then
562      // we need to create a temporary and reconstruct it from the
563      // arguments.
564      std::string Name = Arg->getNameAsString();
565      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
566                                           (Name + ".addr").c_str());
567      // FIXME: What are the right qualifiers here?
568      llvm::Function::arg_iterator End =
569        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
570      EmitParmDecl(*Arg, Temp);
571
572      // Name the arguments used in expansion and increment AI.
573      unsigned Index = 0;
574      for (; AI != End; ++AI, ++Index)
575        AI->setName(Name + "." + llvm::utostr(Index));
576      continue;
577    }
578
579    case ABIArgInfo::Ignore:
580      // Initialize the local variable appropriately.
581      if (hasAggregateLLVMType(Ty)) {
582        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
583      } else {
584        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
585      }
586
587      // Skip increment, no matching LLVM parameter.
588      continue;
589
590    case ABIArgInfo::Coerce: {
591      assert(AI != Fn->arg_end() && "Argument mismatch!");
592      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
593      // result in a new alloca anyway, so we could just store into that
594      // directly if we broke the abstraction down more.
595      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
596      CreateCoercedStore(AI, V, *this);
597      // Match to what EmitParmDecl is expecting for this type.
598      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
599        V = EmitLoadOfScalar(V, false, Ty);
600        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
601          // This must be a promotion, for something like
602          // "void a(x) short x; {..."
603          V = EmitScalarConversion(V, Ty, Arg->getType());
604        }
605      }
606      EmitParmDecl(*Arg, V);
607      break;
608    }
609    }
610
611    ++AI;
612  }
613  assert(AI == Fn->arg_end() && "Argument mismatch!");
614}
615
616void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
617                                         llvm::Value *ReturnValue) {
618  llvm::Value *RV = 0;
619
620  // Functions with no result always return void.
621  if (ReturnValue) {
622    QualType RetTy = FI.getReturnType();
623    const ABIArgInfo &RetAI = FI.getReturnInfo();
624
625    switch (RetAI.getKind()) {
626    case ABIArgInfo::Indirect:
627      if (RetTy->isAnyComplexType()) {
628        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
629        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
630      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
631        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
632      } else {
633        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
634                          false, RetTy);
635      }
636      break;
637
638    case ABIArgInfo::Direct:
639      // The internal return value temp always will have
640      // pointer-to-return-type type.
641      RV = Builder.CreateLoad(ReturnValue);
642      break;
643
644    case ABIArgInfo::Ignore:
645      break;
646
647    case ABIArgInfo::Coerce:
648      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
649      break;
650
651    case ABIArgInfo::Expand:
652      assert(0 && "Invalid ABI kind for return argument");
653    }
654  }
655
656  if (RV) {
657    Builder.CreateRet(RV);
658  } else {
659    Builder.CreateRetVoid();
660  }
661}
662
663RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
664  if (ArgType->isReferenceType())
665    return EmitReferenceBindingToExpr(E, ArgType);
666
667  return EmitAnyExprToTemp(E);
668}
669
670RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
671                                 llvm::Value *Callee,
672                                 const CallArgList &CallArgs,
673                                 const Decl *TargetDecl) {
674  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
675  llvm::SmallVector<llvm::Value*, 16> Args;
676
677  // Handle struct-return functions by passing a pointer to the
678  // location that we would like to return into.
679  QualType RetTy = CallInfo.getReturnType();
680  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
681  if (CGM.ReturnTypeUsesSret(CallInfo)) {
682    // Create a temporary alloca to hold the result of the call. :(
683    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
684  }
685
686  assert(CallInfo.arg_size() == CallArgs.size() &&
687         "Mismatch between function signature & arguments.");
688  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
689  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
690       I != E; ++I, ++info_it) {
691    const ABIArgInfo &ArgInfo = info_it->info;
692    RValue RV = I->first;
693
694    switch (ArgInfo.getKind()) {
695    case ABIArgInfo::Indirect:
696      if (RV.isScalar() || RV.isComplex()) {
697        // Make a temporary alloca to pass the argument.
698        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
699        if (RV.isScalar())
700          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
701        else
702          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
703      } else {
704        Args.push_back(RV.getAggregateAddr());
705      }
706      break;
707
708    case ABIArgInfo::Direct:
709      if (RV.isScalar()) {
710        Args.push_back(RV.getScalarVal());
711      } else if (RV.isComplex()) {
712        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
713        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
714        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
715        Args.push_back(Tmp);
716      } else {
717        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
718      }
719      break;
720
721    case ABIArgInfo::Ignore:
722      break;
723
724    case ABIArgInfo::Coerce: {
725      // FIXME: Avoid the conversion through memory if possible.
726      llvm::Value *SrcPtr;
727      if (RV.isScalar()) {
728        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
729        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
730      } else if (RV.isComplex()) {
731        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
732        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
733      } else
734        SrcPtr = RV.getAggregateAddr();
735      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
736                                       *this));
737      break;
738    }
739
740    case ABIArgInfo::Expand:
741      ExpandTypeToArgs(I->second, RV, Args);
742      break;
743    }
744  }
745
746  llvm::BasicBlock *InvokeDest = getInvokeDest();
747  CodeGen::AttributeListType AttributeList;
748  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
749  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
750                                                   AttributeList.end());
751
752  llvm::CallSite CS;
753  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
754    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
755  } else {
756    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
757    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
758                              Args.data(), Args.data()+Args.size());
759    EmitBlock(Cont);
760  }
761
762  CS.setAttributes(Attrs);
763  if (const llvm::Function *F =  dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
764    CS.setCallingConv(F->getCallingConv());
765
766  // If the call doesn't return, finish the basic block and clear the
767  // insertion point; this allows the rest of IRgen to discard
768  // unreachable code.
769  if (CS.doesNotReturn()) {
770    Builder.CreateUnreachable();
771    Builder.ClearInsertionPoint();
772
773    // FIXME: For now, emit a dummy basic block because expr emitters in
774    // generally are not ready to handle emitting expressions at unreachable
775    // points.
776    EnsureInsertPoint();
777
778    // Return a reasonable RValue.
779    return GetUndefRValue(RetTy);
780  }
781
782  llvm::Instruction *CI = CS.getInstruction();
783  if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
784    CI->setName("call");
785
786  switch (RetAI.getKind()) {
787  case ABIArgInfo::Indirect:
788    if (RetTy->isAnyComplexType())
789      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
790    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
791      return RValue::getAggregate(Args[0]);
792    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
793
794  case ABIArgInfo::Direct:
795    if (RetTy->isAnyComplexType()) {
796      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
797      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
798      return RValue::getComplex(std::make_pair(Real, Imag));
799    }
800    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
801      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
802      Builder.CreateStore(CI, V);
803      return RValue::getAggregate(V);
804    }
805    return RValue::get(CI);
806
807  case ABIArgInfo::Ignore:
808    // If we are ignoring an argument that had a result, make sure to
809    // construct the appropriate return value for our caller.
810    return GetUndefRValue(RetTy);
811
812  case ABIArgInfo::Coerce: {
813    // FIXME: Avoid the conversion through memory if possible.
814    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
815    CreateCoercedStore(CI, V, *this);
816    if (RetTy->isAnyComplexType())
817      return RValue::getComplex(LoadComplexFromAddr(V, false));
818    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
819      return RValue::getAggregate(V);
820    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
821  }
822
823  case ABIArgInfo::Expand:
824    assert(0 && "Invalid ABI kind for return argument");
825  }
826
827  assert(0 && "Unhandled ABIArgInfo::Kind");
828  return RValue::get(0);
829}
830
831/* VarArg handling */
832
833llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
834  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
835}
836