CGCall.cpp revision 194711
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
17#include "CodeGenModule.h"
18#include "clang/Basic/TargetInfo.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/Frontend/CompileOptions.h"
23#include "llvm/Attributes.h"
24#include "llvm/Support/CallSite.h"
25#include "llvm/Target/TargetData.h"
26
27#include "ABIInfo.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32/***/
33
34// FIXME: Use iterator and sidestep silly type array creation.
35
36const
37CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
38  return getFunctionInfo(FTNP->getResultType(),
39                         llvm::SmallVector<QualType, 16>());
40}
41
42const
43CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
44  llvm::SmallVector<QualType, 16> ArgTys;
45  // FIXME: Kill copy.
46  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
47    ArgTys.push_back(FTP->getArgType(i));
48  return getFunctionInfo(FTP->getResultType(), ArgTys);
49}
50
51const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
52  llvm::SmallVector<QualType, 16> ArgTys;
53  // Add the 'this' pointer unless this is a static method.
54  if (MD->isInstance())
55    ArgTys.push_back(MD->getThisType(Context));
56
57  const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
58  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
59    ArgTys.push_back(FTP->getArgType(i));
60  return getFunctionInfo(FTP->getResultType(), ArgTys);
61}
62
63const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
64  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
65    if (MD->isInstance())
66      return getFunctionInfo(MD);
67
68  const FunctionType *FTy = FD->getType()->getAsFunctionType();
69  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
70    return getFunctionInfo(FTP);
71  return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
72}
73
74const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
75  llvm::SmallVector<QualType, 16> ArgTys;
76  ArgTys.push_back(MD->getSelfDecl()->getType());
77  ArgTys.push_back(Context.getObjCSelType());
78  // FIXME: Kill copy?
79  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
80         e = MD->param_end(); i != e; ++i)
81    ArgTys.push_back((*i)->getType());
82  return getFunctionInfo(MD->getResultType(), ArgTys);
83}
84
85const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
86                                                    const CallArgList &Args) {
87  // FIXME: Kill copy.
88  llvm::SmallVector<QualType, 16> ArgTys;
89  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
90       i != e; ++i)
91    ArgTys.push_back(i->second);
92  return getFunctionInfo(ResTy, ArgTys);
93}
94
95const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
96                                                  const FunctionArgList &Args) {
97  // FIXME: Kill copy.
98  llvm::SmallVector<QualType, 16> ArgTys;
99  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
100       i != e; ++i)
101    ArgTys.push_back(i->second);
102  return getFunctionInfo(ResTy, ArgTys);
103}
104
105const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
106                               const llvm::SmallVector<QualType, 16> &ArgTys) {
107  // Lookup or create unique function info.
108  llvm::FoldingSetNodeID ID;
109  CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
110
111  void *InsertPos = 0;
112  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
113  if (FI)
114    return *FI;
115
116  // Construct the function info.
117  FI = new CGFunctionInfo(ResTy, ArgTys);
118  FunctionInfos.InsertNode(FI, InsertPos);
119
120  // Compute ABI information.
121  getABIInfo().computeInfo(*FI, getContext());
122
123  return *FI;
124}
125
126CGFunctionInfo::CGFunctionInfo(QualType ResTy,
127                               const llvm::SmallVector<QualType, 16> &ArgTys) {
128  NumArgs = ArgTys.size();
129  Args = new ArgInfo[1 + NumArgs];
130  Args[0].type = ResTy;
131  for (unsigned i = 0; i < NumArgs; ++i)
132    Args[1 + i].type = ArgTys[i];
133}
134
135/***/
136
137void CodeGenTypes::GetExpandedTypes(QualType Ty,
138                                    std::vector<const llvm::Type*> &ArgTys) {
139  const RecordType *RT = Ty->getAsStructureType();
140  assert(RT && "Can only expand structure types.");
141  const RecordDecl *RD = RT->getDecl();
142  assert(!RD->hasFlexibleArrayMember() &&
143         "Cannot expand structure with flexible array.");
144
145  for (RecordDecl::field_iterator i = RD->field_begin(Context),
146         e = RD->field_end(Context); i != e; ++i) {
147    const FieldDecl *FD = *i;
148    assert(!FD->isBitField() &&
149           "Cannot expand structure with bit-field members.");
150
151    QualType FT = FD->getType();
152    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
153      GetExpandedTypes(FT, ArgTys);
154    } else {
155      ArgTys.push_back(ConvertType(FT));
156    }
157  }
158}
159
160llvm::Function::arg_iterator
161CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
162                                    llvm::Function::arg_iterator AI) {
163  const RecordType *RT = Ty->getAsStructureType();
164  assert(RT && "Can only expand structure types.");
165
166  RecordDecl *RD = RT->getDecl();
167  assert(LV.isSimple() &&
168         "Unexpected non-simple lvalue during struct expansion.");
169  llvm::Value *Addr = LV.getAddress();
170  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
171         e = RD->field_end(getContext()); i != e; ++i) {
172    FieldDecl *FD = *i;
173    QualType FT = FD->getType();
174
175    // FIXME: What are the right qualifiers here?
176    LValue LV = EmitLValueForField(Addr, FD, false, 0);
177    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
178      AI = ExpandTypeFromArgs(FT, LV, AI);
179    } else {
180      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
181      ++AI;
182    }
183  }
184
185  return AI;
186}
187
188void
189CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
190                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
191  const RecordType *RT = Ty->getAsStructureType();
192  assert(RT && "Can only expand structure types.");
193
194  RecordDecl *RD = RT->getDecl();
195  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
196  llvm::Value *Addr = RV.getAggregateAddr();
197  for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
198         e = RD->field_end(getContext()); i != e; ++i) {
199    FieldDecl *FD = *i;
200    QualType FT = FD->getType();
201
202    // FIXME: What are the right qualifiers here?
203    LValue LV = EmitLValueForField(Addr, FD, false, 0);
204    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
205      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
206    } else {
207      RValue RV = EmitLoadOfLValue(LV, FT);
208      assert(RV.isScalar() &&
209             "Unexpected non-scalar rvalue during struct expansion.");
210      Args.push_back(RV.getScalarVal());
211    }
212  }
213}
214
215/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
216/// a pointer to an object of type \arg Ty.
217///
218/// This safely handles the case when the src type is smaller than the
219/// destination type; in this situation the values of bits which not
220/// present in the src are undefined.
221static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
222                                      const llvm::Type *Ty,
223                                      CodeGenFunction &CGF) {
224  const llvm::Type *SrcTy =
225    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
226  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
227  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
228
229  // If load is legal, just bitcast the src pointer.
230  if (SrcSize >= DstSize) {
231    // Generally SrcSize is never greater than DstSize, since this means we are
232    // losing bits. However, this can happen in cases where the structure has
233    // additional padding, for example due to a user specified alignment.
234    //
235    // FIXME: Assert that we aren't truncating non-padding bits when have access
236    // to that information.
237    llvm::Value *Casted =
238      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
239    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
240    // FIXME: Use better alignment / avoid requiring aligned load.
241    Load->setAlignment(1);
242    return Load;
243  } else {
244    // Otherwise do coercion through memory. This is stupid, but
245    // simple.
246    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
247    llvm::Value *Casted =
248      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
249    llvm::StoreInst *Store =
250      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
251    // FIXME: Use better alignment / avoid requiring aligned store.
252    Store->setAlignment(1);
253    return CGF.Builder.CreateLoad(Tmp);
254  }
255}
256
257/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
258/// where the source and destination may have different types.
259///
260/// This safely handles the case when the src type is larger than the
261/// destination type; the upper bits of the src will be lost.
262static void CreateCoercedStore(llvm::Value *Src,
263                               llvm::Value *DstPtr,
264                               CodeGenFunction &CGF) {
265  const llvm::Type *SrcTy = Src->getType();
266  const llvm::Type *DstTy =
267    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
268
269  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
270  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
271
272  // If store is legal, just bitcast the src pointer.
273  if (SrcSize <= DstSize) {
274    llvm::Value *Casted =
275      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
276    // FIXME: Use better alignment / avoid requiring aligned store.
277    CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
278  } else {
279    // Otherwise do coercion through memory. This is stupid, but
280    // simple.
281
282    // Generally SrcSize is never greater than DstSize, since this means we are
283    // losing bits. However, this can happen in cases where the structure has
284    // additional padding, for example due to a user specified alignment.
285    //
286    // FIXME: Assert that we aren't truncating non-padding bits when have access
287    // to that information.
288    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
289    CGF.Builder.CreateStore(Src, Tmp);
290    llvm::Value *Casted =
291      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
292    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
293    // FIXME: Use better alignment / avoid requiring aligned load.
294    Load->setAlignment(1);
295    CGF.Builder.CreateStore(Load, DstPtr);
296  }
297}
298
299/***/
300
301bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
302  return FI.getReturnInfo().isIndirect();
303}
304
305const llvm::FunctionType *
306CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
307  std::vector<const llvm::Type*> ArgTys;
308
309  const llvm::Type *ResultType = 0;
310
311  QualType RetTy = FI.getReturnType();
312  const ABIArgInfo &RetAI = FI.getReturnInfo();
313  switch (RetAI.getKind()) {
314  case ABIArgInfo::Expand:
315    assert(0 && "Invalid ABI kind for return argument");
316
317  case ABIArgInfo::Extend:
318  case ABIArgInfo::Direct:
319    ResultType = ConvertType(RetTy);
320    break;
321
322  case ABIArgInfo::Indirect: {
323    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
324    ResultType = llvm::Type::VoidTy;
325    const llvm::Type *STy = ConvertType(RetTy);
326    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
327    break;
328  }
329
330  case ABIArgInfo::Ignore:
331    ResultType = llvm::Type::VoidTy;
332    break;
333
334  case ABIArgInfo::Coerce:
335    ResultType = RetAI.getCoerceToType();
336    break;
337  }
338
339  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
340         ie = FI.arg_end(); it != ie; ++it) {
341    const ABIArgInfo &AI = it->info;
342
343    switch (AI.getKind()) {
344    case ABIArgInfo::Ignore:
345      break;
346
347    case ABIArgInfo::Coerce:
348      ArgTys.push_back(AI.getCoerceToType());
349      break;
350
351    case ABIArgInfo::Indirect: {
352      // indirect arguments are always on the stack, which is addr space #0.
353      const llvm::Type *LTy = ConvertTypeForMem(it->type);
354      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
355      break;
356    }
357
358    case ABIArgInfo::Extend:
359    case ABIArgInfo::Direct:
360      ArgTys.push_back(ConvertType(it->type));
361      break;
362
363    case ABIArgInfo::Expand:
364      GetExpandedTypes(it->type, ArgTys);
365      break;
366    }
367  }
368
369  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
370}
371
372void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
373                                           const Decl *TargetDecl,
374                                           AttributeListType &PAL) {
375  unsigned FuncAttrs = 0;
376  unsigned RetAttrs = 0;
377
378  // FIXME: handle sseregparm someday...
379  if (TargetDecl) {
380    if (TargetDecl->hasAttr<NoThrowAttr>(getContext()))
381      FuncAttrs |= llvm::Attribute::NoUnwind;
382    if (TargetDecl->hasAttr<NoReturnAttr>(getContext()))
383      FuncAttrs |= llvm::Attribute::NoReturn;
384    if (TargetDecl->hasAttr<ConstAttr>(getContext()))
385      FuncAttrs |= llvm::Attribute::ReadNone;
386    else if (TargetDecl->hasAttr<PureAttr>(getContext()))
387      FuncAttrs |= llvm::Attribute::ReadOnly;
388  }
389
390  if (CompileOpts.DisableRedZone)
391    FuncAttrs |= llvm::Attribute::NoRedZone;
392  if (CompileOpts.NoImplicitFloat)
393    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
394
395  QualType RetTy = FI.getReturnType();
396  unsigned Index = 1;
397  const ABIArgInfo &RetAI = FI.getReturnInfo();
398  switch (RetAI.getKind()) {
399  case ABIArgInfo::Extend:
400   if (RetTy->isSignedIntegerType()) {
401     RetAttrs |= llvm::Attribute::SExt;
402   } else if (RetTy->isUnsignedIntegerType()) {
403     RetAttrs |= llvm::Attribute::ZExt;
404   }
405   // FALLTHROUGH
406  case ABIArgInfo::Direct:
407    break;
408
409  case ABIArgInfo::Indirect:
410    PAL.push_back(llvm::AttributeWithIndex::get(Index,
411                                                llvm::Attribute::StructRet |
412                                                llvm::Attribute::NoAlias));
413    ++Index;
414    // sret disables readnone and readonly
415    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
416                   llvm::Attribute::ReadNone);
417    break;
418
419  case ABIArgInfo::Ignore:
420  case ABIArgInfo::Coerce:
421    break;
422
423  case ABIArgInfo::Expand:
424    assert(0 && "Invalid ABI kind for return argument");
425  }
426
427  if (RetAttrs)
428    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
429
430  // FIXME: we need to honour command line settings also...
431  // FIXME: RegParm should be reduced in case of nested functions and/or global
432  // register variable.
433  signed RegParm = 0;
434  if (TargetDecl)
435    if (const RegparmAttr *RegParmAttr
436          = TargetDecl->getAttr<RegparmAttr>(getContext()))
437      RegParm = RegParmAttr->getNumParams();
438
439  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
440  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
441         ie = FI.arg_end(); it != ie; ++it) {
442    QualType ParamType = it->type;
443    const ABIArgInfo &AI = it->info;
444    unsigned Attributes = 0;
445
446    switch (AI.getKind()) {
447    case ABIArgInfo::Coerce:
448      break;
449
450    case ABIArgInfo::Indirect:
451      Attributes |= llvm::Attribute::ByVal;
452      Attributes |=
453        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
454      // byval disables readnone and readonly.
455      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
456                     llvm::Attribute::ReadNone);
457      break;
458
459    case ABIArgInfo::Extend:
460     if (ParamType->isSignedIntegerType()) {
461       Attributes |= llvm::Attribute::SExt;
462     } else if (ParamType->isUnsignedIntegerType()) {
463       Attributes |= llvm::Attribute::ZExt;
464     }
465     // FALLS THROUGH
466    case ABIArgInfo::Direct:
467      if (RegParm > 0 &&
468          (ParamType->isIntegerType() || ParamType->isPointerType())) {
469        RegParm -=
470          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
471        if (RegParm >= 0)
472          Attributes |= llvm::Attribute::InReg;
473      }
474      // FIXME: handle sseregparm someday...
475      break;
476
477    case ABIArgInfo::Ignore:
478      // Skip increment, no matching LLVM parameter.
479      continue;
480
481    case ABIArgInfo::Expand: {
482      std::vector<const llvm::Type*> Tys;
483      // FIXME: This is rather inefficient. Do we ever actually need to do
484      // anything here? The result should be just reconstructed on the other
485      // side, so extension should be a non-issue.
486      getTypes().GetExpandedTypes(ParamType, Tys);
487      Index += Tys.size();
488      continue;
489    }
490    }
491
492    if (Attributes)
493      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
494    ++Index;
495  }
496  if (FuncAttrs)
497    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
498}
499
500void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
501                                         llvm::Function *Fn,
502                                         const FunctionArgList &Args) {
503  // FIXME: We no longer need the types from FunctionArgList; lift up and
504  // simplify.
505
506  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
507  llvm::Function::arg_iterator AI = Fn->arg_begin();
508
509  // Name the struct return argument.
510  if (CGM.ReturnTypeUsesSret(FI)) {
511    AI->setName("agg.result");
512    ++AI;
513  }
514
515  assert(FI.arg_size() == Args.size() &&
516         "Mismatch between function signature & arguments.");
517  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
518  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
519       i != e; ++i, ++info_it) {
520    const VarDecl *Arg = i->first;
521    QualType Ty = info_it->type;
522    const ABIArgInfo &ArgI = info_it->info;
523
524    switch (ArgI.getKind()) {
525    case ABIArgInfo::Indirect: {
526      llvm::Value* V = AI;
527      if (hasAggregateLLVMType(Ty)) {
528        // Do nothing, aggregates and complex variables are accessed by
529        // reference.
530      } else {
531        // Load scalar value from indirect argument.
532        V = EmitLoadOfScalar(V, false, Ty);
533        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
534          // This must be a promotion, for something like
535          // "void a(x) short x; {..."
536          V = EmitScalarConversion(V, Ty, Arg->getType());
537        }
538      }
539      EmitParmDecl(*Arg, V);
540      break;
541    }
542
543    case ABIArgInfo::Extend:
544    case ABIArgInfo::Direct: {
545      assert(AI != Fn->arg_end() && "Argument mismatch!");
546      llvm::Value* V = AI;
547      if (hasAggregateLLVMType(Ty)) {
548        // Create a temporary alloca to hold the argument; the rest of
549        // codegen expects to access aggregates & complex values by
550        // reference.
551        V = CreateTempAlloca(ConvertTypeForMem(Ty));
552        Builder.CreateStore(AI, V);
553      } else {
554        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
555          // This must be a promotion, for something like
556          // "void a(x) short x; {..."
557          V = EmitScalarConversion(V, Ty, Arg->getType());
558        }
559      }
560      EmitParmDecl(*Arg, V);
561      break;
562    }
563
564    case ABIArgInfo::Expand: {
565      // If this structure was expanded into multiple arguments then
566      // we need to create a temporary and reconstruct it from the
567      // arguments.
568      std::string Name = Arg->getNameAsString();
569      llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
570                                           (Name + ".addr").c_str());
571      // FIXME: What are the right qualifiers here?
572      llvm::Function::arg_iterator End =
573        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
574      EmitParmDecl(*Arg, Temp);
575
576      // Name the arguments used in expansion and increment AI.
577      unsigned Index = 0;
578      for (; AI != End; ++AI, ++Index)
579        AI->setName(Name + "." + llvm::utostr(Index));
580      continue;
581    }
582
583    case ABIArgInfo::Ignore:
584      // Initialize the local variable appropriately.
585      if (hasAggregateLLVMType(Ty)) {
586        EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
587      } else {
588        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
589      }
590
591      // Skip increment, no matching LLVM parameter.
592      continue;
593
594    case ABIArgInfo::Coerce: {
595      assert(AI != Fn->arg_end() && "Argument mismatch!");
596      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
597      // result in a new alloca anyway, so we could just store into that
598      // directly if we broke the abstraction down more.
599      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
600      CreateCoercedStore(AI, V, *this);
601      // Match to what EmitParmDecl is expecting for this type.
602      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
603        V = EmitLoadOfScalar(V, false, Ty);
604        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
605          // This must be a promotion, for something like
606          // "void a(x) short x; {..."
607          V = EmitScalarConversion(V, Ty, Arg->getType());
608        }
609      }
610      EmitParmDecl(*Arg, V);
611      break;
612    }
613    }
614
615    ++AI;
616  }
617  assert(AI == Fn->arg_end() && "Argument mismatch!");
618}
619
620void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
621                                         llvm::Value *ReturnValue) {
622  llvm::Value *RV = 0;
623
624  // Functions with no result always return void.
625  if (ReturnValue) {
626    QualType RetTy = FI.getReturnType();
627    const ABIArgInfo &RetAI = FI.getReturnInfo();
628
629    switch (RetAI.getKind()) {
630    case ABIArgInfo::Indirect:
631      if (RetTy->isAnyComplexType()) {
632        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
633        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
634      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
635        EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
636      } else {
637        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
638                          false, RetTy);
639      }
640      break;
641
642    case ABIArgInfo::Extend:
643    case ABIArgInfo::Direct:
644      // The internal return value temp always will have
645      // pointer-to-return-type type.
646      RV = Builder.CreateLoad(ReturnValue);
647      break;
648
649    case ABIArgInfo::Ignore:
650      break;
651
652    case ABIArgInfo::Coerce:
653      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
654      break;
655
656    case ABIArgInfo::Expand:
657      assert(0 && "Invalid ABI kind for return argument");
658    }
659  }
660
661  if (RV) {
662    Builder.CreateRet(RV);
663  } else {
664    Builder.CreateRetVoid();
665  }
666}
667
668RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
669  if (ArgType->isReferenceType())
670    return EmitReferenceBindingToExpr(E, ArgType);
671
672  return EmitAnyExprToTemp(E);
673}
674
675RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
676                                 llvm::Value *Callee,
677                                 const CallArgList &CallArgs,
678                                 const Decl *TargetDecl) {
679  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
680  llvm::SmallVector<llvm::Value*, 16> Args;
681
682  // Handle struct-return functions by passing a pointer to the
683  // location that we would like to return into.
684  QualType RetTy = CallInfo.getReturnType();
685  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
686
687
688  // If the call returns a temporary with struct return, create a temporary
689  // alloca to hold the result.
690  if (CGM.ReturnTypeUsesSret(CallInfo))
691    Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
692
693  assert(CallInfo.arg_size() == CallArgs.size() &&
694         "Mismatch between function signature & arguments.");
695  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
696  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
697       I != E; ++I, ++info_it) {
698    const ABIArgInfo &ArgInfo = info_it->info;
699    RValue RV = I->first;
700
701    switch (ArgInfo.getKind()) {
702    case ABIArgInfo::Indirect:
703      if (RV.isScalar() || RV.isComplex()) {
704        // Make a temporary alloca to pass the argument.
705        Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
706        if (RV.isScalar())
707          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
708        else
709          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
710      } else {
711        Args.push_back(RV.getAggregateAddr());
712      }
713      break;
714
715    case ABIArgInfo::Extend:
716    case ABIArgInfo::Direct:
717      if (RV.isScalar()) {
718        Args.push_back(RV.getScalarVal());
719      } else if (RV.isComplex()) {
720        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
721        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
722        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
723        Args.push_back(Tmp);
724      } else {
725        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
726      }
727      break;
728
729    case ABIArgInfo::Ignore:
730      break;
731
732    case ABIArgInfo::Coerce: {
733      // FIXME: Avoid the conversion through memory if possible.
734      llvm::Value *SrcPtr;
735      if (RV.isScalar()) {
736        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
737        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
738      } else if (RV.isComplex()) {
739        SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
740        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
741      } else
742        SrcPtr = RV.getAggregateAddr();
743      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
744                                       *this));
745      break;
746    }
747
748    case ABIArgInfo::Expand:
749      ExpandTypeToArgs(I->second, RV, Args);
750      break;
751    }
752  }
753
754  // If the callee is a bitcast of a function to a varargs pointer to function
755  // type, check to see if we can remove the bitcast.  This handles some cases
756  // with unprototyped functions.
757  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
758    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
759      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
760      const llvm::FunctionType *CurFT =
761        cast<llvm::FunctionType>(CurPT->getElementType());
762      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
763
764      if (CE->getOpcode() == llvm::Instruction::BitCast &&
765          ActualFT->getReturnType() == CurFT->getReturnType() &&
766          ActualFT->getNumParams() == CurFT->getNumParams() &&
767          ActualFT->getNumParams() == Args.size()) {
768        bool ArgsMatch = true;
769        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
770          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
771            ArgsMatch = false;
772            break;
773          }
774
775        // Strip the cast if we can get away with it.  This is a nice cleanup,
776        // but also allows us to inline the function at -O0 if it is marked
777        // always_inline.
778        if (ArgsMatch)
779          Callee = CalleeF;
780      }
781    }
782
783
784  llvm::BasicBlock *InvokeDest = getInvokeDest();
785  CodeGen::AttributeListType AttributeList;
786  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
787  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
788                                                   AttributeList.end());
789
790  llvm::CallSite CS;
791  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
792    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
793  } else {
794    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
795    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
796                              Args.data(), Args.data()+Args.size());
797    EmitBlock(Cont);
798  }
799
800  CS.setAttributes(Attrs);
801  if (const llvm::Function *F =
802        dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
803    CS.setCallingConv(F->getCallingConv());
804
805  // If the call doesn't return, finish the basic block and clear the
806  // insertion point; this allows the rest of IRgen to discard
807  // unreachable code.
808  if (CS.doesNotReturn()) {
809    Builder.CreateUnreachable();
810    Builder.ClearInsertionPoint();
811
812    // FIXME: For now, emit a dummy basic block because expr emitters in
813    // generally are not ready to handle emitting expressions at unreachable
814    // points.
815    EnsureInsertPoint();
816
817    // Return a reasonable RValue.
818    return GetUndefRValue(RetTy);
819  }
820
821  llvm::Instruction *CI = CS.getInstruction();
822  if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
823    CI->setName("call");
824
825  switch (RetAI.getKind()) {
826  case ABIArgInfo::Indirect:
827    if (RetTy->isAnyComplexType())
828      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
829    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
830      return RValue::getAggregate(Args[0]);
831    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
832
833  case ABIArgInfo::Extend:
834  case ABIArgInfo::Direct:
835    if (RetTy->isAnyComplexType()) {
836      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
837      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
838      return RValue::getComplex(std::make_pair(Real, Imag));
839    }
840    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
841      llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
842      Builder.CreateStore(CI, V);
843      return RValue::getAggregate(V);
844    }
845    return RValue::get(CI);
846
847  case ABIArgInfo::Ignore:
848    // If we are ignoring an argument that had a result, make sure to
849    // construct the appropriate return value for our caller.
850    return GetUndefRValue(RetTy);
851
852  case ABIArgInfo::Coerce: {
853    // FIXME: Avoid the conversion through memory if possible.
854    llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
855    CreateCoercedStore(CI, V, *this);
856    if (RetTy->isAnyComplexType())
857      return RValue::getComplex(LoadComplexFromAddr(V, false));
858    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
859      return RValue::getAggregate(V);
860    return RValue::get(EmitLoadOfScalar(V, false, RetTy));
861  }
862
863  case ABIArgInfo::Expand:
864    assert(0 && "Invalid ABI kind for return argument");
865  }
866
867  assert(0 && "Unhandled ABIArgInfo::Kind");
868  return RValue::get(0);
869}
870
871/* VarArg handling */
872
873llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
874  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
875}
876