CGCall.cpp revision 195341
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/Frontend/CompileOptions.h"
23#include "llvm/Attributes.h"
24#include "llvm/Support/CallSite.h"
25#include "llvm/Target/TargetData.h"
26
27#include "ABIInfo.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32/***/
33
34// FIXME: Use iterator and sidestep silly type array creation.
35
36const
37CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
38  return getFunctionInfo(FTNP->getResultType(),
39                         llvm::SmallVector<QualType, 16>());
40}
41
42const
43CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
44  llvm::SmallVector<QualType, 16> ArgTys;
45  // FIXME: Kill copy.
46  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
47    ArgTys.push_back(FTP->getArgType(i));
48  return getFunctionInfo(FTP->getResultType(), ArgTys);
49}
50
51const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
52  llvm::SmallVector<QualType, 16> ArgTys;
53  // Add the 'this' pointer unless this is a static method.
54  if (MD->isInstance())
55    ArgTys.push_back(MD->getThisType(Context));
56
57  const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
58  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
59    ArgTys.push_back(FTP->getArgType(i));
60  return getFunctionInfo(FTP->getResultType(), ArgTys);
61}
62
63const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
64  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
65    if (MD->isInstance())
66      return getFunctionInfo(MD);
67
68  const FunctionType *FTy = FD->getType()->getAsFunctionType();
69  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
70    return getFunctionInfo(FTP);
71  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
72}
73
74const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
75  llvm::SmallVector<QualType, 16> ArgTys;
76  ArgTys.push_back(MD->getSelfDecl()->getType());
77  ArgTys.push_back(Context.getObjCSelType());
78  // FIXME: Kill copy?
79  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
80         e = MD->param_end(); i != e; ++i)
81    ArgTys.push_back((*i)->getType());
82  return getFunctionInfo(MD->getResultType(), ArgTys);
83}
84
85const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
86                                                    const CallArgList &Args) {
87  // FIXME: Kill copy.
88  llvm::SmallVector<QualType, 16> ArgTys;
89  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
90       i != e; ++i)
91    ArgTys.push_back(i->second);
92  return getFunctionInfo(ResTy, ArgTys);
93}
94
95const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
96                                                  const FunctionArgList &Args) {
97  // FIXME: Kill copy.
98  llvm::SmallVector<QualType, 16> ArgTys;
99  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
100       i != e; ++i)
101    ArgTys.push_back(i->second);
102  return getFunctionInfo(ResTy, ArgTys);
103}
104
105const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
106                               const llvm::SmallVector<QualType, 16> &ArgTys) {
107  // Lookup or create unique function info.
108  llvm::FoldingSetNodeID ID;
109  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
110
111  void *InsertPos = 0;
112  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
113  if (FI)
114    return *FI;
115
116  // Construct the function info.
117  FI = new CGFunctionInfo(ResTy, ArgTys);
118  FunctionInfos.InsertNode(FI, InsertPos);
119
120  // Compute ABI information.
121  getABIInfo().computeInfo(*FI, getContext());
122
123  return *FI;
124}
125
126CGFunctionInfo::CGFunctionInfo(QualType ResTy,
127                               const llvm::SmallVector<QualType, 16> &ArgTys) {
128  NumArgs = ArgTys.size();
129  Args = new ArgInfo[1 + NumArgs];
130  Args[0].type = ResTy;
131  for (unsigned i = 0; i < NumArgs; ++i)
132    Args[1 + i].type = ArgTys[i];
133}
134
135/***/
136
137void CodeGenTypes::GetExpandedTypes(QualType Ty,
138                                    std::vector<const llvm::Type*> &ArgTys) {
139  const RecordType *RT = Ty->getAsStructureType();
140  assert(RT && "Can only expand structure types.");
141  const RecordDecl *RD = RT->getDecl();
142  assert(!RD->hasFlexibleArrayMember() &&
143         "Cannot expand structure with flexible array.");
144
145  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
146         i != e; ++i) {
147    const FieldDecl *FD = *i;
148    assert(!FD->isBitField() &&
149           "Cannot expand structure with bit-field members.");
150
151    QualType FT = FD->getType();
152    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
153      GetExpandedTypes(FT, ArgTys);
154    } else {
155      ArgTys.push_back(ConvertType(FT));
156    }
157  }
158}
159
160llvm::Function::arg_iterator
161CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
162                                    llvm::Function::arg_iterator AI) {
163  const RecordType *RT = Ty->getAsStructureType();
164  assert(RT && "Can only expand structure types.");
165
166  RecordDecl *RD = RT->getDecl();
167  assert(LV.isSimple() &&
168         "Unexpected non-simple lvalue during struct expansion.");
169  llvm::Value *Addr = LV.getAddress();
170  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
171         i != e; ++i) {
172    FieldDecl *FD = *i;
173    QualType FT = FD->getType();
174
175    // FIXME: What are the right qualifiers here?
176    LValue LV = EmitLValueForField(Addr, FD, false, 0);
177    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
178      AI = ExpandTypeFromArgs(FT, LV, AI);
179    } else {
180      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
181      ++AI;
182    }
183  }
184
185  return AI;
186}
187
188void
189CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
190                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
191  const RecordType *RT = Ty->getAsStructureType();
192  assert(RT && "Can only expand structure types.");
193
194  RecordDecl *RD = RT->getDecl();
195  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
196  llvm::Value *Addr = RV.getAggregateAddr();
197  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
198         i != e; ++i) {
199    FieldDecl *FD = *i;
200    QualType FT = FD->getType();
201
202    // FIXME: What are the right qualifiers here?
203    LValue LV = EmitLValueForField(Addr, FD, false, 0);
204    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
205      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
206    } else {
207      RValue RV = EmitLoadOfLValue(LV, FT);
208      assert(RV.isScalar() &&
209             "Unexpected non-scalar rvalue during struct expansion.");
210      Args.push_back(RV.getScalarVal());
211    }
212  }
213}
214
215/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
216/// a pointer to an object of type \arg Ty.
217///
218/// This safely handles the case when the src type is smaller than the
219/// destination type; in this situation the values of bits which not
220/// present in the src are undefined.
221static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
222                                      const llvm::Type *Ty,
223                                      CodeGenFunction &CGF) {
224  const llvm::Type *SrcTy =
225    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
226  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
227  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
228
229  // If load is legal, just bitcast the src pointer.
230  if (SrcSize >= DstSize) {
231    // Generally SrcSize is never greater than DstSize, since this means we are
232    // losing bits. However, this can happen in cases where the structure has
233    // additional padding, for example due to a user specified alignment.
234    //
235    // FIXME: Assert that we aren't truncating non-padding bits when have access
236    // to that information.
237    llvm::Value *Casted =
238      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
239    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
240    // FIXME: Use better alignment / avoid requiring aligned load.
241    Load->setAlignment(1);
242    return Load;
243  } else {
244    // Otherwise do coercion through memory. This is stupid, but
245    // simple.
246    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
247    llvm::Value *Casted =
248      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
249    llvm::StoreInst *Store =
250      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
251    // FIXME: Use better alignment / avoid requiring aligned store.
252    Store->setAlignment(1);
253    return CGF.Builder.CreateLoad(Tmp);
254  }
255}
256
257/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
258/// where the source and destination may have different types.
259///
260/// This safely handles the case when the src type is larger than the
261/// destination type; the upper bits of the src will be lost.
262static void CreateCoercedStore(llvm::Value *Src,
263                               llvm::Value *DstPtr,
264                               CodeGenFunction &CGF) {
265  const llvm::Type *SrcTy = Src->getType();
266  const llvm::Type *DstTy =
267    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
268
269  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
270  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
271
272  // If store is legal, just bitcast the src pointer.
273  if (SrcSize <= DstSize) {
274    llvm::Value *Casted =
275      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
276    // FIXME: Use better alignment / avoid requiring aligned store.
277    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
278  } else {
279    // Otherwise do coercion through memory. This is stupid, but
280    // simple.
281
282    // Generally SrcSize is never greater than DstSize, since this means we are
283    // losing bits. However, this can happen in cases where the structure has
284    // additional padding, for example due to a user specified alignment.
285    //
286    // FIXME: Assert that we aren't truncating non-padding bits when have access
287    // to that information.
288    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
289    CGF.Builder.CreateStore(Src, Tmp);
290    llvm::Value *Casted =
291      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
292    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
293    // FIXME: Use better alignment / avoid requiring aligned load.
294    Load->setAlignment(1);
295    CGF.Builder.CreateStore(Load, DstPtr);
296  }
297}
298
299/***/
300
301bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
302  return FI.getReturnInfo().isIndirect();
303}
304
305const llvm::FunctionType *
306CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
307  std::vector<const llvm::Type*> ArgTys;
308
309  const llvm::Type *ResultType = 0;
310
311  QualType RetTy = FI.getReturnType();
312  const ABIArgInfo &RetAI = FI.getReturnInfo();
313  switch (RetAI.getKind()) {
314  case ABIArgInfo::Expand:
315    assert(0 && "Invalid ABI kind for return argument");
316
317  case ABIArgInfo::Extend:
318  case ABIArgInfo::Direct:
319    ResultType = ConvertType(RetTy);
320    break;
321
322  case ABIArgInfo::Indirect: {
323    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
324    ResultType = llvm::Type::VoidTy;
325    const llvm::Type *STy = ConvertType(RetTy);
326    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
327    break;
328  }
329
330  case ABIArgInfo::Ignore:
331    ResultType = llvm::Type::VoidTy;
332    break;
333
334  case ABIArgInfo::Coerce:
335    ResultType = RetAI.getCoerceToType();
336    break;
337  }
338
339  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
340         ie = FI.arg_end(); it != ie; ++it) {
341    const ABIArgInfo &AI = it->info;
342
343    switch (AI.getKind()) {
344    case ABIArgInfo::Ignore:
345      break;
346
347    case ABIArgInfo::Coerce:
348      ArgTys.push_back(AI.getCoerceToType());
349      break;
350
351    case ABIArgInfo::Indirect: {
352      // indirect arguments are always on the stack, which is addr space #0.
353      const llvm::Type *LTy = ConvertTypeForMem(it->type);
354      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
355      break;
356    }
357
358    case ABIArgInfo::Extend:
359    case ABIArgInfo::Direct:
360      ArgTys.push_back(ConvertType(it->type));
361      break;
362
363    case ABIArgInfo::Expand:
364      GetExpandedTypes(it->type, ArgTys);
365      break;
366    }
367  }
368
369  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
370}
371
372void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
373                                           const Decl *TargetDecl,
374                                           AttributeListType &PAL) {
375  unsigned FuncAttrs = 0;
376  unsigned RetAttrs = 0;
377
378  // FIXME: handle sseregparm someday...
379  if (TargetDecl) {
380    if (TargetDecl->hasAttr<NoThrowAttr>())
381      FuncAttrs |= llvm::Attribute::NoUnwind;
382    if (TargetDecl->hasAttr<NoReturnAttr>())
383      FuncAttrs |= llvm::Attribute::NoReturn;
384    if (TargetDecl->hasAttr<ConstAttr>())
385      FuncAttrs |= llvm::Attribute::ReadNone;
386    else if (TargetDecl->hasAttr<PureAttr>())
387      FuncAttrs |= llvm::Attribute::ReadOnly;
388  }
389
390  if (CompileOpts.DisableRedZone)
391    FuncAttrs |= llvm::Attribute::NoRedZone;
392  if (CompileOpts.NoImplicitFloat)
393    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
394
395  if (Features.getStackProtectorMode() == LangOptions::SSPOn)
396    FuncAttrs |= llvm::Attribute::StackProtect;
397  else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
398    FuncAttrs |= llvm::Attribute::StackProtectReq;
399
400  QualType RetTy = FI.getReturnType();
401  unsigned Index = 1;
402  const ABIArgInfo &RetAI = FI.getReturnInfo();
403  switch (RetAI.getKind()) {
404  case ABIArgInfo::Extend:
405   if (RetTy->isSignedIntegerType()) {
406     RetAttrs |= llvm::Attribute::SExt;
407   } else if (RetTy->isUnsignedIntegerType()) {
408     RetAttrs |= llvm::Attribute::ZExt;
409   }
410   // FALLTHROUGH
411  case ABIArgInfo::Direct:
412    break;
413
414  case ABIArgInfo::Indirect:
415    PAL.push_back(llvm::AttributeWithIndex::get(Index,
416                                                llvm::Attribute::StructRet |
417                                                llvm::Attribute::NoAlias));
418    ++Index;
419    // sret disables readnone and readonly
420    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
421                   llvm::Attribute::ReadNone);
422    break;
423
424  case ABIArgInfo::Ignore:
425  case ABIArgInfo::Coerce:
426    break;
427
428  case ABIArgInfo::Expand:
429    assert(0 && "Invalid ABI kind for return argument");
430  }
431
432  if (RetAttrs)
433    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
434
435  // FIXME: we need to honour command line settings also...
436  // FIXME: RegParm should be reduced in case of nested functions and/or global
437  // register variable.
438  signed RegParm = 0;
439  if (TargetDecl)
440    if (const RegparmAttr *RegParmAttr
441          = TargetDecl->getAttr<RegparmAttr>())
442      RegParm = RegParmAttr->getNumParams();
443
444  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
445  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
446         ie = FI.arg_end(); it != ie; ++it) {
447    QualType ParamType = it->type;
448    const ABIArgInfo &AI = it->info;
449    unsigned Attributes = 0;
450
451    switch (AI.getKind()) {
452    case ABIArgInfo::Coerce:
453      break;
454
455    case ABIArgInfo::Indirect:
456      Attributes |= llvm::Attribute::ByVal;
457      Attributes |=
458        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
459      // byval disables readnone and readonly.
460      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
461                     llvm::Attribute::ReadNone);
462      break;
463
464    case ABIArgInfo::Extend:
465     if (ParamType->isSignedIntegerType()) {
466       Attributes |= llvm::Attribute::SExt;
467     } else if (ParamType->isUnsignedIntegerType()) {
468       Attributes |= llvm::Attribute::ZExt;
469     }
470     // FALLS THROUGH
471    case ABIArgInfo::Direct:
472      if (RegParm > 0 &&
473          (ParamType->isIntegerType() || ParamType->isPointerType())) {
474        RegParm -=
475          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
476        if (RegParm >= 0)
477          Attributes |= llvm::Attribute::InReg;
478      }
479      // FIXME: handle sseregparm someday...
480      break;
481
482    case ABIArgInfo::Ignore:
483      // Skip increment, no matching LLVM parameter.
484      continue;
485
486    case ABIArgInfo::Expand: {
487      std::vector<const llvm::Type*> Tys;
488      // FIXME: This is rather inefficient. Do we ever actually need to do
489      // anything here? The result should be just reconstructed on the other
490      // side, so extension should be a non-issue.
491      getTypes().GetExpandedTypes(ParamType, Tys);
492      Index += Tys.size();
493      continue;
494    }
495    }
496
497    if (Attributes)
498      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
499    ++Index;
500  }
501  if (FuncAttrs)
502    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
503}
504
505void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
506                                         llvm::Function *Fn,
507                                         const FunctionArgList &Args) {
508  // FIXME: We no longer need the types from FunctionArgList; lift up and
509  // simplify.
510
511  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
512  llvm::Function::arg_iterator AI = Fn->arg_begin();
513
514  // Name the struct return argument.
515  if (CGM.ReturnTypeUsesSret(FI)) {
516    AI->setName("agg.result");
517    ++AI;
518  }
519
520  assert(FI.arg_size() == Args.size() &&
521         "Mismatch between function signature & arguments.");
522  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
523  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
524       i != e; ++i, ++info_it) {
525    const VarDecl *Arg = i->first;
526    QualType Ty = info_it->type;
527    const ABIArgInfo &ArgI = info_it->info;
528
529    switch (ArgI.getKind()) {
530    case ABIArgInfo::Indirect: {
531      llvm::Value* V = AI;
532      if (hasAggregateLLVMType(Ty)) {
533        // Do nothing, aggregates and complex variables are accessed by
534        // reference.
535      } else {
536        // Load scalar value from indirect argument.
537        V = EmitLoadOfScalar(V, false, Ty);
538        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
539          // This must be a promotion, for something like
540          // "void a(x) short x; {..."
541          V = EmitScalarConversion(V, Ty, Arg->getType());
542        }
543      }
544      EmitParmDecl(*Arg, V);
545      break;
546    }
547
548    case ABIArgInfo::Extend:
549    case ABIArgInfo::Direct: {
550      assert(AI != Fn->arg_end() && "Argument mismatch!");
551      llvm::Value* V = AI;
552      if (hasAggregateLLVMType(Ty)) {
553        // Create a temporary alloca to hold the argument; the rest of
554        // codegen expects to access aggregates & complex values by
555        // reference.
556        V = CreateTempAlloca(ConvertTypeForMem(Ty));
557        Builder.CreateStore(AI, V);
558      } else {
559        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
560          // This must be a promotion, for something like
561          // "void a(x) short x; {..."
562          V = EmitScalarConversion(V, Ty, Arg->getType());
563        }
564      }
565      EmitParmDecl(*Arg, V);
566      break;
567    }
568
569    case ABIArgInfo::Expand: {
570      // If this structure was expanded into multiple arguments then
571      // we need to create a temporary and reconstruct it from the
572      // arguments.
573      std::string Name = Arg->getNameAsString();
574      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
575                                           (Name + ".addr").c_str());
576      // FIXME: What are the right qualifiers here?
577      llvm::Function::arg_iterator End =
578        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
579      EmitParmDecl(*Arg, Temp);
580
581      // Name the arguments used in expansion and increment AI.
582      unsigned Index = 0;
583      for (; AI != End; ++AI, ++Index)
584        AI->setName(Name + "." + llvm::utostr(Index));
585      continue;
586    }
587
588    case ABIArgInfo::Ignore:
589      // Initialize the local variable appropriately.
590      if (hasAggregateLLVMType(Ty)) {
591        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
592      } else {
593        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
594      }
595
596      // Skip increment, no matching LLVM parameter.
597      continue;
598
599    case ABIArgInfo::Coerce: {
600      assert(AI != Fn->arg_end() && "Argument mismatch!");
601      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
602      // result in a new alloca anyway, so we could just store into that
603      // directly if we broke the abstraction down more.
604      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
605      CreateCoercedStore(AI, V, *this);
606      // Match to what EmitParmDecl is expecting for this type.
607      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
608        V = EmitLoadOfScalar(V, false, Ty);
609        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
610          // This must be a promotion, for something like
611          // "void a(x) short x; {..."
612          V = EmitScalarConversion(V, Ty, Arg->getType());
613        }
614      }
615      EmitParmDecl(*Arg, V);
616      break;
617    }
618    }
619
620    ++AI;
621  }
622  assert(AI == Fn->arg_end() && "Argument mismatch!");
623}
624
625void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
626                                         llvm::Value *ReturnValue) {
627  llvm::Value *RV = 0;
628
629  // Functions with no result always return void.
630  if (ReturnValue) {
631    QualType RetTy = FI.getReturnType();
632    const ABIArgInfo &RetAI = FI.getReturnInfo();
633
634    switch (RetAI.getKind()) {
635    case ABIArgInfo::Indirect:
636      if (RetTy->isAnyComplexType()) {
637        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
638        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
639      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
640        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
641      } else {
642        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
643                          false, RetTy);
644      }
645      break;
646
647    case ABIArgInfo::Extend:
648    case ABIArgInfo::Direct:
649      // The internal return value temp always will have
650      // pointer-to-return-type type.
651      RV = Builder.CreateLoad(ReturnValue);
652      break;
653
654    case ABIArgInfo::Ignore:
655      break;
656
657    case ABIArgInfo::Coerce:
658      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
659      break;
660
661    case ABIArgInfo::Expand:
662      assert(0 && "Invalid ABI kind for return argument");
663    }
664  }
665
666  if (RV) {
667    Builder.CreateRet(RV);
668  } else {
669    Builder.CreateRetVoid();
670  }
671}
672
673RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
674  if (ArgType->isReferenceType())
675    return EmitReferenceBindingToExpr(E, ArgType);
676
677  return EmitAnyExprToTemp(E);
678}
679
680RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
681                                 llvm::Value *Callee,
682                                 const CallArgList &CallArgs,
683                                 const Decl *TargetDecl) {
684  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
685  llvm::SmallVector<llvm::Value*, 16> Args;
686
687  // Handle struct-return functions by passing a pointer to the
688  // location that we would like to return into.
689  QualType RetTy = CallInfo.getReturnType();
690  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
691
692
693  // If the call returns a temporary with struct return, create a temporary
694  // alloca to hold the result.
695  if (CGM.ReturnTypeUsesSret(CallInfo))
696    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
697
698  assert(CallInfo.arg_size() == CallArgs.size() &&
699         "Mismatch between function signature & arguments.");
700  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
701  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
702       I != E; ++I, ++info_it) {
703    const ABIArgInfo &ArgInfo = info_it->info;
704    RValue RV = I->first;
705
706    switch (ArgInfo.getKind()) {
707    case ABIArgInfo::Indirect:
708      if (RV.isScalar() || RV.isComplex()) {
709        // Make a temporary alloca to pass the argument.
710        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
711        if (RV.isScalar())
712          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
713        else
714          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
715      } else {
716        Args.push_back(RV.getAggregateAddr());
717      }
718      break;
719
720    case ABIArgInfo::Extend:
721    case ABIArgInfo::Direct:
722      if (RV.isScalar()) {
723        Args.push_back(RV.getScalarVal());
724      } else if (RV.isComplex()) {
725        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
726        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
727        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
728        Args.push_back(Tmp);
729      } else {
730        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
731      }
732      break;
733
734    case ABIArgInfo::Ignore:
735      break;
736
737    case ABIArgInfo::Coerce: {
738      // FIXME: Avoid the conversion through memory if possible.
739      llvm::Value *SrcPtr;
740      if (RV.isScalar()) {
741        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
742        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
743      } else if (RV.isComplex()) {
744        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
745        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
746      } else
747        SrcPtr = RV.getAggregateAddr();
748      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
749                                       *this));
750      break;
751    }
752
753    case ABIArgInfo::Expand:
754      ExpandTypeToArgs(I->second, RV, Args);
755      break;
756    }
757  }
758
759  // If the callee is a bitcast of a function to a varargs pointer to function
760  // type, check to see if we can remove the bitcast.  This handles some cases
761  // with unprototyped functions.
762  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
763    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
764      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
765      const llvm::FunctionType *CurFT =
766        cast<llvm::FunctionType>(CurPT->getElementType());
767      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
768
769      if (CE->getOpcode() == llvm::Instruction::BitCast &&
770          ActualFT->getReturnType() == CurFT->getReturnType() &&
771          ActualFT->getNumParams() == CurFT->getNumParams() &&
772          ActualFT->getNumParams() == Args.size()) {
773        bool ArgsMatch = true;
774        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
775          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
776            ArgsMatch = false;
777            break;
778          }
779
780        // Strip the cast if we can get away with it.  This is a nice cleanup,
781        // but also allows us to inline the function at -O0 if it is marked
782        // always_inline.
783        if (ArgsMatch)
784          Callee = CalleeF;
785      }
786    }
787
788
789  llvm::BasicBlock *InvokeDest = getInvokeDest();
790  CodeGen::AttributeListType AttributeList;
791  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
792  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
793                                                   AttributeList.end());
794
795  llvm::CallSite CS;
796  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
797    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
798  } else {
799    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
800    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
801                              Args.data(), Args.data()+Args.size());
802    EmitBlock(Cont);
803  }
804
805  CS.setAttributes(Attrs);
806  if (const llvm::Function *F =
807        dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
808    CS.setCallingConv(F->getCallingConv());
809
810  // If the call doesn't return, finish the basic block and clear the
811  // insertion point; this allows the rest of IRgen to discard
812  // unreachable code.
813  if (CS.doesNotReturn()) {
814    Builder.CreateUnreachable();
815    Builder.ClearInsertionPoint();
816
817    // FIXME: For now, emit a dummy basic block because expr emitters in
818    // generally are not ready to handle emitting expressions at unreachable
819    // points.
820    EnsureInsertPoint();
821
822    // Return a reasonable RValue.
823    return GetUndefRValue(RetTy);
824  }
825
826  llvm::Instruction *CI = CS.getInstruction();
827  if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
828    CI->setName("call");
829
830  switch (RetAI.getKind()) {
831  case ABIArgInfo::Indirect:
832    if (RetTy->isAnyComplexType())
833      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
834    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
835      return RValue::getAggregate(Args[0]);
836    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
837
838  case ABIArgInfo::Extend:
839  case ABIArgInfo::Direct:
840    if (RetTy->isAnyComplexType()) {
841      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
842      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
843      return RValue::getComplex(std::make_pair(Real, Imag));
844    }
845    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
846      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
847      Builder.CreateStore(CI, V);
848      return RValue::getAggregate(V);
849    }
850    return RValue::get(CI);
851
852  case ABIArgInfo::Ignore:
853    // If we are ignoring an argument that had a result, make sure to
854    // construct the appropriate return value for our caller.
855    return GetUndefRValue(RetTy);
856
857  case ABIArgInfo::Coerce: {
858    // FIXME: Avoid the conversion through memory if possible.
859    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
860    CreateCoercedStore(CI, V, *this);
861    if (RetTy->isAnyComplexType())
862      return RValue::getComplex(LoadComplexFromAddr(V, false));
863    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
864      return RValue::getAggregate(V);
865    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
866  }
867
868  case ABIArgInfo::Expand:
869    assert(0 && "Invalid ABI kind for return argument");
870  }
871
872  assert(0 && "Unhandled ABIArgInfo::Kind");
873  return RValue::get(0);
874}
875
876/* VarArg handling */
877
878llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
879  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
880}
881