TargetInfo.cpp revision 210299
1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "TargetInfo.h"
16#include "ABIInfo.h"
17#include "CodeGenFunction.h"
18#include "clang/AST/RecordLayout.h"
19#include "llvm/Type.h"
20#include "llvm/Target/TargetData.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/ADT/Triple.h"
23#include "llvm/Support/raw_ostream.h"
24using namespace clang;
25using namespace CodeGen;
26
27static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
28                               llvm::Value *Array,
29                               llvm::Value *Value,
30                               unsigned FirstIndex,
31                               unsigned LastIndex) {
32  // Alternatively, we could emit this as a loop in the source.
33  for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
34    llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
35    Builder.CreateStore(Value, Cell);
36  }
37}
38
39ABIInfo::~ABIInfo() {}
40
41void ABIArgInfo::dump() const {
42  llvm::raw_ostream &OS = llvm::errs();
43  OS << "(ABIArgInfo Kind=";
44  switch (TheKind) {
45  case Direct:
46    OS << "Direct";
47    break;
48  case Extend:
49    OS << "Extend";
50    break;
51  case Ignore:
52    OS << "Ignore";
53    break;
54  case Coerce:
55    OS << "Coerce Type=";
56    getCoerceToType()->print(OS);
57    break;
58  case Indirect:
59    OS << "Indirect Align=" << getIndirectAlign()
60       << " Byal=" << getIndirectByVal();
61    break;
62  case Expand:
63    OS << "Expand";
64    break;
65  }
66  OS << ")\n";
67}
68
69TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
70
71static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
72
73/// isEmptyField - Return true iff a the field is "empty", that is it
74/// is an unnamed bit-field or an (array of) empty record(s).
75static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
76                         bool AllowArrays) {
77  if (FD->isUnnamedBitfield())
78    return true;
79
80  QualType FT = FD->getType();
81
82    // Constant arrays of empty records count as empty, strip them off.
83  if (AllowArrays)
84    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
85      FT = AT->getElementType();
86
87  const RecordType *RT = FT->getAs<RecordType>();
88  if (!RT)
89    return false;
90
91  // C++ record fields are never empty, at least in the Itanium ABI.
92  //
93  // FIXME: We should use a predicate for whether this behavior is true in the
94  // current ABI.
95  if (isa<CXXRecordDecl>(RT->getDecl()))
96    return false;
97
98  return isEmptyRecord(Context, FT, AllowArrays);
99}
100
101/// isEmptyRecord - Return true iff a structure contains only empty
102/// fields. Note that a structure with a flexible array member is not
103/// considered empty.
104static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
105  const RecordType *RT = T->getAs<RecordType>();
106  if (!RT)
107    return 0;
108  const RecordDecl *RD = RT->getDecl();
109  if (RD->hasFlexibleArrayMember())
110    return false;
111
112  // If this is a C++ record, check the bases first.
113  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
114    for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
115           e = CXXRD->bases_end(); i != e; ++i)
116      if (!isEmptyRecord(Context, i->getType(), true))
117        return false;
118
119  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
120         i != e; ++i)
121    if (!isEmptyField(Context, *i, AllowArrays))
122      return false;
123  return true;
124}
125
126/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
127/// a non-trivial destructor or a non-trivial copy constructor.
128static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
129  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
130  if (!RD)
131    return false;
132
133  return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
134}
135
136/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
137/// a record type with either a non-trivial destructor or a non-trivial copy
138/// constructor.
139static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
140  const RecordType *RT = T->getAs<RecordType>();
141  if (!RT)
142    return false;
143
144  return hasNonTrivialDestructorOrCopyConstructor(RT);
145}
146
147/// isSingleElementStruct - Determine if a structure is a "single
148/// element struct", i.e. it has exactly one non-empty field or
149/// exactly one field which is itself a single element
150/// struct. Structures with flexible array members are never
151/// considered single element structs.
152///
153/// \return The field declaration for the single non-empty field, if
154/// it exists.
155static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
156  const RecordType *RT = T->getAsStructureType();
157  if (!RT)
158    return 0;
159
160  const RecordDecl *RD = RT->getDecl();
161  if (RD->hasFlexibleArrayMember())
162    return 0;
163
164  const Type *Found = 0;
165
166  // If this is a C++ record, check the bases first.
167  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
168    for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
169           e = CXXRD->bases_end(); i != e; ++i) {
170      // Ignore empty records.
171      if (isEmptyRecord(Context, i->getType(), true))
172        continue;
173
174      // If we already found an element then this isn't a single-element struct.
175      if (Found)
176        return 0;
177
178      // If this is non-empty and not a single element struct, the composite
179      // cannot be a single element struct.
180      Found = isSingleElementStruct(i->getType(), Context);
181      if (!Found)
182        return 0;
183    }
184  }
185
186  // Check for single element.
187  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
188         i != e; ++i) {
189    const FieldDecl *FD = *i;
190    QualType FT = FD->getType();
191
192    // Ignore empty fields.
193    if (isEmptyField(Context, FD, true))
194      continue;
195
196    // If we already found an element then this isn't a single-element
197    // struct.
198    if (Found)
199      return 0;
200
201    // Treat single element arrays as the element.
202    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
203      if (AT->getSize().getZExtValue() != 1)
204        break;
205      FT = AT->getElementType();
206    }
207
208    if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
209      Found = FT.getTypePtr();
210    } else {
211      Found = isSingleElementStruct(FT, Context);
212      if (!Found)
213        return 0;
214    }
215  }
216
217  return Found;
218}
219
220static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
221  if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
222      !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
223      !Ty->isBlockPointerType())
224    return false;
225
226  uint64_t Size = Context.getTypeSize(Ty);
227  return Size == 32 || Size == 64;
228}
229
230/// canExpandIndirectArgument - Test whether an argument type which is to be
231/// passed indirectly (on the stack) would have the equivalent layout if it was
232/// expanded into separate arguments. If so, we prefer to do the latter to avoid
233/// inhibiting optimizations.
234///
235// FIXME: This predicate is missing many cases, currently it just follows
236// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
237// should probably make this smarter, or better yet make the LLVM backend
238// capable of handling it.
239static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
240  // We can only expand structure types.
241  const RecordType *RT = Ty->getAs<RecordType>();
242  if (!RT)
243    return false;
244
245  // We can only expand (C) structures.
246  //
247  // FIXME: This needs to be generalized to handle classes as well.
248  const RecordDecl *RD = RT->getDecl();
249  if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
250    return false;
251
252  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
253         i != e; ++i) {
254    const FieldDecl *FD = *i;
255
256    if (!is32Or64BitBasicType(FD->getType(), Context))
257      return false;
258
259    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
260    // how to expand them yet, and the predicate for telling if a bitfield still
261    // counts as "basic" is more complicated than what we were doing previously.
262    if (FD->isBitField())
263      return false;
264  }
265
266  return true;
267}
268
269namespace {
270/// DefaultABIInfo - The default implementation for ABI specific
271/// details. This implementation provides information which results in
272/// self-consistent and sensible LLVM IR generation, but does not
273/// conform to any particular ABI.
274class DefaultABIInfo : public ABIInfo {
275  ABIArgInfo classifyReturnType(QualType RetTy,
276                                ASTContext &Context,
277                                llvm::LLVMContext &VMContext) const;
278
279  ABIArgInfo classifyArgumentType(QualType RetTy,
280                                  ASTContext &Context,
281                                  llvm::LLVMContext &VMContext) const;
282
283  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
284                           llvm::LLVMContext &VMContext,
285                           const llvm::Type *const *PrefTypes,
286                           unsigned NumPrefTypes) const {
287    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
288                                            VMContext);
289    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
290         it != ie; ++it)
291      it->info = classifyArgumentType(it->type, Context, VMContext);
292  }
293
294  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
295                                 CodeGenFunction &CGF) const;
296};
297
298class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
299public:
300  DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
301};
302
303llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
304                                       CodeGenFunction &CGF) const {
305  return 0;
306}
307
308ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
309                                                ASTContext &Context,
310                                          llvm::LLVMContext &VMContext) const {
311  if (CodeGenFunction::hasAggregateLLVMType(Ty))
312    return ABIArgInfo::getIndirect(0);
313
314  // Treat an enum type as its underlying type.
315  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
316    Ty = EnumTy->getDecl()->getIntegerType();
317
318  return (Ty->isPromotableIntegerType() ?
319          ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
320}
321
322//===----------------------------------------------------------------------===//
323// X86-32 ABI Implementation
324//===----------------------------------------------------------------------===//
325
326/// X86_32ABIInfo - The X86-32 ABI information.
327class X86_32ABIInfo : public ABIInfo {
328  ASTContext &Context;
329  bool IsDarwinVectorABI;
330  bool IsSmallStructInRegABI;
331
332  static bool isRegisterSize(unsigned Size) {
333    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
334  }
335
336  static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
337
338  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
339  /// such that the argument will be passed in memory.
340  ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
341                               bool ByVal = true) const;
342
343public:
344  ABIArgInfo classifyReturnType(QualType RetTy,
345                                ASTContext &Context,
346                                llvm::LLVMContext &VMContext) const;
347
348  ABIArgInfo classifyArgumentType(QualType RetTy,
349                                  ASTContext &Context,
350                                  llvm::LLVMContext &VMContext) const;
351
352  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
353                           llvm::LLVMContext &VMContext,
354                           const llvm::Type *const *PrefTypes,
355                           unsigned NumPrefTypes) const {
356    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
357                                            VMContext);
358    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
359         it != ie; ++it)
360      it->info = classifyArgumentType(it->type, Context, VMContext);
361  }
362
363  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
364                                 CodeGenFunction &CGF) const;
365
366  X86_32ABIInfo(ASTContext &Context, bool d, bool p)
367    : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
368      IsSmallStructInRegABI(p) {}
369};
370
371class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
372public:
373  X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
374    :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
375
376  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
377                           CodeGen::CodeGenModule &CGM) const;
378
379  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
380    // Darwin uses different dwarf register numbers for EH.
381    if (CGM.isTargetDarwin()) return 5;
382
383    return 4;
384  }
385
386  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
387                               llvm::Value *Address) const;
388};
389
390}
391
392/// shouldReturnTypeInRegister - Determine if the given type should be
393/// passed in a register (for the Darwin ABI).
394bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
395                                               ASTContext &Context) {
396  uint64_t Size = Context.getTypeSize(Ty);
397
398  // Type must be register sized.
399  if (!isRegisterSize(Size))
400    return false;
401
402  if (Ty->isVectorType()) {
403    // 64- and 128- bit vectors inside structures are not returned in
404    // registers.
405    if (Size == 64 || Size == 128)
406      return false;
407
408    return true;
409  }
410
411  // If this is a builtin, pointer, enum, complex type, member pointer, or
412  // member function pointer it is ok.
413  if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
414      Ty->isAnyComplexType() || Ty->isEnumeralType() ||
415      Ty->isBlockPointerType() || Ty->isMemberPointerType())
416    return true;
417
418  // Arrays are treated like records.
419  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
420    return shouldReturnTypeInRegister(AT->getElementType(), Context);
421
422  // Otherwise, it must be a record type.
423  const RecordType *RT = Ty->getAs<RecordType>();
424  if (!RT) return false;
425
426  // FIXME: Traverse bases here too.
427
428  // Structure types are passed in register if all fields would be
429  // passed in a register.
430  for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
431         e = RT->getDecl()->field_end(); i != e; ++i) {
432    const FieldDecl *FD = *i;
433
434    // Empty fields are ignored.
435    if (isEmptyField(Context, FD, true))
436      continue;
437
438    // Check fields recursively.
439    if (!shouldReturnTypeInRegister(FD->getType(), Context))
440      return false;
441  }
442
443  return true;
444}
445
446ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
447                                            ASTContext &Context,
448                                          llvm::LLVMContext &VMContext) const {
449  if (RetTy->isVoidType()) {
450    return ABIArgInfo::getIgnore();
451  } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
452    // On Darwin, some vectors are returned in registers.
453    if (IsDarwinVectorABI) {
454      uint64_t Size = Context.getTypeSize(RetTy);
455
456      // 128-bit vectors are a special case; they are returned in
457      // registers and we need to make sure to pick a type the LLVM
458      // backend will like.
459      if (Size == 128)
460        return ABIArgInfo::getCoerce(llvm::VectorType::get(
461                  llvm::Type::getInt64Ty(VMContext), 2));
462
463      // Always return in register if it fits in a general purpose
464      // register, or if it is 64 bits and has a single element.
465      if ((Size == 8 || Size == 16 || Size == 32) ||
466          (Size == 64 && VT->getNumElements() == 1))
467        return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
468
469      return ABIArgInfo::getIndirect(0);
470    }
471
472    return ABIArgInfo::getDirect();
473  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
474    if (const RecordType *RT = RetTy->getAs<RecordType>()) {
475      // Structures with either a non-trivial destructor or a non-trivial
476      // copy constructor are always indirect.
477      if (hasNonTrivialDestructorOrCopyConstructor(RT))
478        return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
479
480      // Structures with flexible arrays are always indirect.
481      if (RT->getDecl()->hasFlexibleArrayMember())
482        return ABIArgInfo::getIndirect(0);
483    }
484
485    // If specified, structs and unions are always indirect.
486    if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
487      return ABIArgInfo::getIndirect(0);
488
489    // Classify "single element" structs as their element type.
490    if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
491      if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
492        if (BT->isIntegerType()) {
493          // We need to use the size of the structure, padding
494          // bit-fields can adjust that to be larger than the single
495          // element type.
496          uint64_t Size = Context.getTypeSize(RetTy);
497          return ABIArgInfo::getCoerce(
498            llvm::IntegerType::get(VMContext, (unsigned) Size));
499        } else if (BT->getKind() == BuiltinType::Float) {
500          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
501                 "Unexpect single element structure size!");
502          return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
503        } else if (BT->getKind() == BuiltinType::Double) {
504          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
505                 "Unexpect single element structure size!");
506          return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
507        }
508      } else if (SeltTy->isPointerType()) {
509        // FIXME: It would be really nice if this could come out as the proper
510        // pointer type.
511        const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
512        return ABIArgInfo::getCoerce(PtrTy);
513      } else if (SeltTy->isVectorType()) {
514        // 64- and 128-bit vectors are never returned in a
515        // register when inside a structure.
516        uint64_t Size = Context.getTypeSize(RetTy);
517        if (Size == 64 || Size == 128)
518          return ABIArgInfo::getIndirect(0);
519
520        return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
521      }
522    }
523
524    // Small structures which are register sized are generally returned
525    // in a register.
526    if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
527      uint64_t Size = Context.getTypeSize(RetTy);
528      return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
529    }
530
531    return ABIArgInfo::getIndirect(0);
532  } else {
533    // Treat an enum type as its underlying type.
534    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
535      RetTy = EnumTy->getDecl()->getIntegerType();
536
537    return (RetTy->isPromotableIntegerType() ?
538            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
539  }
540}
541
542ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
543                                            ASTContext &Context,
544                                            bool ByVal) const {
545  if (!ByVal)
546    return ABIArgInfo::getIndirect(0, false);
547
548  // Compute the byval alignment. We trust the back-end to honor the
549  // minimum ABI alignment for byval, to make cleaner IR.
550  const unsigned MinABIAlign = 4;
551  unsigned Align = Context.getTypeAlign(Ty) / 8;
552  if (Align > MinABIAlign)
553    return ABIArgInfo::getIndirect(Align);
554  return ABIArgInfo::getIndirect(0);
555}
556
557ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
558                                               ASTContext &Context,
559                                           llvm::LLVMContext &VMContext) const {
560  // FIXME: Set alignment on indirect arguments.
561  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
562    // Structures with flexible arrays are always indirect.
563    if (const RecordType *RT = Ty->getAs<RecordType>()) {
564      // Structures with either a non-trivial destructor or a non-trivial
565      // copy constructor are always indirect.
566      if (hasNonTrivialDestructorOrCopyConstructor(RT))
567        return getIndirectResult(Ty, Context, /*ByVal=*/false);
568
569      if (RT->getDecl()->hasFlexibleArrayMember())
570        return getIndirectResult(Ty, Context);
571    }
572
573    // Ignore empty structs.
574    if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
575      return ABIArgInfo::getIgnore();
576
577    // Expand small (<= 128-bit) record types when we know that the stack layout
578    // of those arguments will match the struct. This is important because the
579    // LLVM backend isn't smart enough to remove byval, which inhibits many
580    // optimizations.
581    if (Context.getTypeSize(Ty) <= 4*32 &&
582        canExpandIndirectArgument(Ty, Context))
583      return ABIArgInfo::getExpand();
584
585    return getIndirectResult(Ty, Context);
586  } else {
587    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
588      Ty = EnumTy->getDecl()->getIntegerType();
589
590    return (Ty->isPromotableIntegerType() ?
591            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
592  }
593}
594
595llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
596                                      CodeGenFunction &CGF) const {
597  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
598  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
599
600  CGBuilderTy &Builder = CGF.Builder;
601  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
602                                                       "ap");
603  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
604  llvm::Type *PTy =
605    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
606  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
607
608  uint64_t Offset =
609    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
610  llvm::Value *NextAddr =
611    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
612                      "ap.next");
613  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
614
615  return AddrTyped;
616}
617
618void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
619                                                  llvm::GlobalValue *GV,
620                                            CodeGen::CodeGenModule &CGM) const {
621  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
622    if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
623      // Get the LLVM function.
624      llvm::Function *Fn = cast<llvm::Function>(GV);
625
626      // Now add the 'alignstack' attribute with a value of 16.
627      Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
628    }
629  }
630}
631
632bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
633                                               CodeGen::CodeGenFunction &CGF,
634                                               llvm::Value *Address) const {
635  CodeGen::CGBuilderTy &Builder = CGF.Builder;
636  llvm::LLVMContext &Context = CGF.getLLVMContext();
637
638  const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
639  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
640
641  // 0-7 are the eight integer registers;  the order is different
642  //   on Darwin (for EH), but the range is the same.
643  // 8 is %eip.
644  AssignToArrayRange(Builder, Address, Four8, 0, 8);
645
646  if (CGF.CGM.isTargetDarwin()) {
647    // 12-16 are st(0..4).  Not sure why we stop at 4.
648    // These have size 16, which is sizeof(long double) on
649    // platforms with 8-byte alignment for that type.
650    llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
651    AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
652
653  } else {
654    // 9 is %eflags, which doesn't get a size on Darwin for some
655    // reason.
656    Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
657
658    // 11-16 are st(0..5).  Not sure why we stop at 5.
659    // These have size 12, which is sizeof(long double) on
660    // platforms with 4-byte alignment for that type.
661    llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
662    AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
663  }
664
665  return false;
666}
667
668//===----------------------------------------------------------------------===//
669// X86-64 ABI Implementation
670//===----------------------------------------------------------------------===//
671
672
673namespace {
674/// X86_64ABIInfo - The X86_64 ABI information.
675class X86_64ABIInfo : public ABIInfo {
676  ASTContext &Context;
677  const llvm::TargetData &TD;
678
679  enum Class {
680    Integer = 0,
681    SSE,
682    SSEUp,
683    X87,
684    X87Up,
685    ComplexX87,
686    NoClass,
687    Memory
688  };
689
690  /// merge - Implement the X86_64 ABI merging algorithm.
691  ///
692  /// Merge an accumulating classification \arg Accum with a field
693  /// classification \arg Field.
694  ///
695  /// \param Accum - The accumulating classification. This should
696  /// always be either NoClass or the result of a previous merge
697  /// call. In addition, this should never be Memory (the caller
698  /// should just return Memory for the aggregate).
699  static Class merge(Class Accum, Class Field);
700
701  /// classify - Determine the x86_64 register classes in which the
702  /// given type T should be passed.
703  ///
704  /// \param Lo - The classification for the parts of the type
705  /// residing in the low word of the containing object.
706  ///
707  /// \param Hi - The classification for the parts of the type
708  /// residing in the high word of the containing object.
709  ///
710  /// \param OffsetBase - The bit offset of this type in the
711  /// containing object.  Some parameters are classified different
712  /// depending on whether they straddle an eightbyte boundary.
713  ///
714  /// If a word is unused its result will be NoClass; if a type should
715  /// be passed in Memory then at least the classification of \arg Lo
716  /// will be Memory.
717  ///
718  /// The \arg Lo class will be NoClass iff the argument is ignored.
719  ///
720  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
721  /// also be ComplexX87.
722  void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
723
724  /// getCoerceResult - Given a source type \arg Ty and an LLVM type
725  /// to coerce to, chose the best way to pass Ty in the same place
726  /// that \arg CoerceTo would be passed, but while keeping the
727  /// emitted code as simple as possible.
728  ///
729  /// FIXME: Note, this should be cleaned up to just take an enumeration of all
730  /// the ways we might want to pass things, instead of constructing an LLVM
731  /// type. This makes this code more explicit, and it makes it clearer that we
732  /// are also doing this for correctness in the case of passing scalar types.
733  ABIArgInfo getCoerceResult(QualType Ty,
734                             const llvm::Type *CoerceTo) const;
735
736  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
737  /// such that the argument will be returned in memory.
738  ABIArgInfo getIndirectReturnResult(QualType Ty) const;
739
740  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
741  /// such that the argument will be passed in memory.
742  ABIArgInfo getIndirectResult(QualType Ty) const;
743
744  ABIArgInfo classifyReturnType(QualType RetTy,
745                                llvm::LLVMContext &VMContext) const;
746
747  ABIArgInfo classifyArgumentType(QualType Ty,
748                                  llvm::LLVMContext &VMContext,
749                                  unsigned &neededInt,
750                                  unsigned &neededSSE,
751                                  const llvm::Type *PrefType) const;
752
753public:
754  X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td)
755    : Context(Ctx), TD(td) {}
756
757  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
758                           llvm::LLVMContext &VMContext,
759                           const llvm::Type *const *PrefTypes,
760                           unsigned NumPrefTypes) const;
761
762  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
763                                 CodeGenFunction &CGF) const;
764};
765
766class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
767public:
768  X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD)
769    : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {}
770
771  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
772    return 7;
773  }
774
775  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
776                               llvm::Value *Address) const {
777    CodeGen::CGBuilderTy &Builder = CGF.Builder;
778    llvm::LLVMContext &Context = CGF.getLLVMContext();
779
780    const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
781    llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
782
783    // 0-15 are the 16 integer registers.
784    // 16 is %rip.
785    AssignToArrayRange(Builder, Address, Eight8, 0, 16);
786
787    return false;
788  }
789};
790
791}
792
793X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
794  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
795  // classified recursively so that always two fields are
796  // considered. The resulting class is calculated according to
797  // the classes of the fields in the eightbyte:
798  //
799  // (a) If both classes are equal, this is the resulting class.
800  //
801  // (b) If one of the classes is NO_CLASS, the resulting class is
802  // the other class.
803  //
804  // (c) If one of the classes is MEMORY, the result is the MEMORY
805  // class.
806  //
807  // (d) If one of the classes is INTEGER, the result is the
808  // INTEGER.
809  //
810  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
811  // MEMORY is used as class.
812  //
813  // (f) Otherwise class SSE is used.
814
815  // Accum should never be memory (we should have returned) or
816  // ComplexX87 (because this cannot be passed in a structure).
817  assert((Accum != Memory && Accum != ComplexX87) &&
818         "Invalid accumulated classification during merge.");
819  if (Accum == Field || Field == NoClass)
820    return Accum;
821  if (Field == Memory)
822    return Memory;
823  if (Accum == NoClass)
824    return Field;
825  if (Accum == Integer || Field == Integer)
826    return Integer;
827  if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
828      Accum == X87 || Accum == X87Up)
829    return Memory;
830  return SSE;
831}
832
833void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
834                             Class &Lo, Class &Hi) const {
835  // FIXME: This code can be simplified by introducing a simple value class for
836  // Class pairs with appropriate constructor methods for the various
837  // situations.
838
839  // FIXME: Some of the split computations are wrong; unaligned vectors
840  // shouldn't be passed in registers for example, so there is no chance they
841  // can straddle an eightbyte. Verify & simplify.
842
843  Lo = Hi = NoClass;
844
845  Class &Current = OffsetBase < 64 ? Lo : Hi;
846  Current = Memory;
847
848  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
849    BuiltinType::Kind k = BT->getKind();
850
851    if (k == BuiltinType::Void) {
852      Current = NoClass;
853    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
854      Lo = Integer;
855      Hi = Integer;
856    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
857      Current = Integer;
858    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
859      Current = SSE;
860    } else if (k == BuiltinType::LongDouble) {
861      Lo = X87;
862      Hi = X87Up;
863    }
864    // FIXME: _Decimal32 and _Decimal64 are SSE.
865    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
866    return;
867  }
868
869  if (const EnumType *ET = Ty->getAs<EnumType>()) {
870    // Classify the underlying integer type.
871    classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
872    return;
873  }
874
875  if (Ty->hasPointerRepresentation()) {
876    Current = Integer;
877    return;
878  }
879
880  if (Ty->isMemberPointerType()) {
881    if (Ty->isMemberFunctionPointerType())
882      Lo = Hi = Integer;
883    else
884      Current = Integer;
885    return;
886  }
887
888  if (const VectorType *VT = Ty->getAs<VectorType>()) {
889    uint64_t Size = Context.getTypeSize(VT);
890    if (Size == 32) {
891      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
892      // float> as integer.
893      Current = Integer;
894
895      // If this type crosses an eightbyte boundary, it should be
896      // split.
897      uint64_t EB_Real = (OffsetBase) / 64;
898      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
899      if (EB_Real != EB_Imag)
900        Hi = Lo;
901    } else if (Size == 64) {
902      // gcc passes <1 x double> in memory. :(
903      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
904        return;
905
906      // gcc passes <1 x long long> as INTEGER.
907      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
908        Current = Integer;
909      else
910        Current = SSE;
911
912      // If this type crosses an eightbyte boundary, it should be
913      // split.
914      if (OffsetBase && OffsetBase != 64)
915        Hi = Lo;
916    } else if (Size == 128) {
917      Lo = SSE;
918      Hi = SSEUp;
919    }
920    return;
921  }
922
923  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
924    QualType ET = Context.getCanonicalType(CT->getElementType());
925
926    uint64_t Size = Context.getTypeSize(Ty);
927    if (ET->isIntegralOrEnumerationType()) {
928      if (Size <= 64)
929        Current = Integer;
930      else if (Size <= 128)
931        Lo = Hi = Integer;
932    } else if (ET == Context.FloatTy)
933      Current = SSE;
934    else if (ET == Context.DoubleTy)
935      Lo = Hi = SSE;
936    else if (ET == Context.LongDoubleTy)
937      Current = ComplexX87;
938
939    // If this complex type crosses an eightbyte boundary then it
940    // should be split.
941    uint64_t EB_Real = (OffsetBase) / 64;
942    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
943    if (Hi == NoClass && EB_Real != EB_Imag)
944      Hi = Lo;
945
946    return;
947  }
948
949  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
950    // Arrays are treated like structures.
951
952    uint64_t Size = Context.getTypeSize(Ty);
953
954    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
955    // than two eightbytes, ..., it has class MEMORY.
956    if (Size > 128)
957      return;
958
959    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
960    // fields, it has class MEMORY.
961    //
962    // Only need to check alignment of array base.
963    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
964      return;
965
966    // Otherwise implement simplified merge. We could be smarter about
967    // this, but it isn't worth it and would be harder to verify.
968    Current = NoClass;
969    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
970    uint64_t ArraySize = AT->getSize().getZExtValue();
971    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
972      Class FieldLo, FieldHi;
973      classify(AT->getElementType(), Offset, FieldLo, FieldHi);
974      Lo = merge(Lo, FieldLo);
975      Hi = merge(Hi, FieldHi);
976      if (Lo == Memory || Hi == Memory)
977        break;
978    }
979
980    // Do post merger cleanup (see below). Only case we worry about is Memory.
981    if (Hi == Memory)
982      Lo = Memory;
983    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
984    return;
985  }
986
987  if (const RecordType *RT = Ty->getAs<RecordType>()) {
988    uint64_t Size = Context.getTypeSize(Ty);
989
990    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
991    // than two eightbytes, ..., it has class MEMORY.
992    if (Size > 128)
993      return;
994
995    // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
996    // copy constructor or a non-trivial destructor, it is passed by invisible
997    // reference.
998    if (hasNonTrivialDestructorOrCopyConstructor(RT))
999      return;
1000
1001    const RecordDecl *RD = RT->getDecl();
1002
1003    // Assume variable sized types are passed in memory.
1004    if (RD->hasFlexibleArrayMember())
1005      return;
1006
1007    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1008
1009    // Reset Lo class, this will be recomputed.
1010    Current = NoClass;
1011
1012    // If this is a C++ record, classify the bases first.
1013    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1014      for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1015             e = CXXRD->bases_end(); i != e; ++i) {
1016        assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1017               "Unexpected base class!");
1018        const CXXRecordDecl *Base =
1019          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1020
1021        // Classify this field.
1022        //
1023        // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1024        // single eightbyte, each is classified separately. Each eightbyte gets
1025        // initialized to class NO_CLASS.
1026        Class FieldLo, FieldHi;
1027        uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
1028        classify(i->getType(), Offset, FieldLo, FieldHi);
1029        Lo = merge(Lo, FieldLo);
1030        Hi = merge(Hi, FieldHi);
1031        if (Lo == Memory || Hi == Memory)
1032          break;
1033      }
1034
1035      // If this record has no fields but isn't empty, classify as INTEGER.
1036      if (RD->field_empty() && Size)
1037        Current = Integer;
1038    }
1039
1040    // Classify the fields one at a time, merging the results.
1041    unsigned idx = 0;
1042    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1043           i != e; ++i, ++idx) {
1044      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1045      bool BitField = i->isBitField();
1046
1047      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1048      // fields, it has class MEMORY.
1049      //
1050      // Note, skip this test for bit-fields, see below.
1051      if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
1052        Lo = Memory;
1053        return;
1054      }
1055
1056      // Classify this field.
1057      //
1058      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1059      // exceeds a single eightbyte, each is classified
1060      // separately. Each eightbyte gets initialized to class
1061      // NO_CLASS.
1062      Class FieldLo, FieldHi;
1063
1064      // Bit-fields require special handling, they do not force the
1065      // structure to be passed in memory even if unaligned, and
1066      // therefore they can straddle an eightbyte.
1067      if (BitField) {
1068        // Ignore padding bit-fields.
1069        if (i->isUnnamedBitfield())
1070          continue;
1071
1072        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1073        uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
1074
1075        uint64_t EB_Lo = Offset / 64;
1076        uint64_t EB_Hi = (Offset + Size - 1) / 64;
1077        FieldLo = FieldHi = NoClass;
1078        if (EB_Lo) {
1079          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1080          FieldLo = NoClass;
1081          FieldHi = Integer;
1082        } else {
1083          FieldLo = Integer;
1084          FieldHi = EB_Hi ? Integer : NoClass;
1085        }
1086      } else
1087        classify(i->getType(), Offset, FieldLo, FieldHi);
1088      Lo = merge(Lo, FieldLo);
1089      Hi = merge(Hi, FieldHi);
1090      if (Lo == Memory || Hi == Memory)
1091        break;
1092    }
1093
1094    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1095    //
1096    // (a) If one of the classes is MEMORY, the whole argument is
1097    // passed in memory.
1098    //
1099    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
1100
1101    // The first of these conditions is guaranteed by how we implement
1102    // the merge (just bail).
1103    //
1104    // The second condition occurs in the case of unions; for example
1105    // union { _Complex double; unsigned; }.
1106    if (Hi == Memory)
1107      Lo = Memory;
1108    if (Hi == SSEUp && Lo != SSE)
1109      Hi = SSE;
1110  }
1111}
1112
1113ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
1114                                          const llvm::Type *CoerceTo) const {
1115  if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) {
1116    // Integer and pointer types will end up in a general purpose
1117    // register.
1118
1119    // Treat an enum type as its underlying type.
1120    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1121      Ty = EnumTy->getDecl()->getIntegerType();
1122
1123    if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation())
1124      return (Ty->isPromotableIntegerType() ?
1125              ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1126
1127    // If this is a 8/16/32-bit structure that is passed as an int64, then it
1128    // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same
1129    // as how an i8/i16/i32 is passed.  Coerce to a i8/i16/i32 instead of a i64.
1130    switch (Context.getTypeSizeInChars(Ty).getQuantity()) {
1131    default: break;
1132    case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break;
1133    case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break;
1134    case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break;
1135    }
1136
1137  } else if (CoerceTo->isDoubleTy()) {
1138    assert(Ty.isCanonical() && "should always have a canonical type here");
1139    assert(!Ty.hasQualifiers() && "should never have a qualified type here");
1140
1141    // Float and double end up in a single SSE reg.
1142    if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
1143      return ABIArgInfo::getDirect();
1144
1145    // If this is a 32-bit structure that is passed as a double, then it will be
1146    // passed in the low 32-bits of the XMM register, which is the same as how a
1147    // float is passed.  Coerce to a float instead of a double.
1148    if (Context.getTypeSizeInChars(Ty).getQuantity() == 4)
1149      CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
1150  }
1151
1152  return ABIArgInfo::getCoerce(CoerceTo);
1153}
1154
1155ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1156  // If this is a scalar LLVM value then assume LLVM will pass it in the right
1157  // place naturally.
1158  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1159    // Treat an enum type as its underlying type.
1160    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1161      Ty = EnumTy->getDecl()->getIntegerType();
1162
1163    return (Ty->isPromotableIntegerType() ?
1164            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1165  }
1166
1167  return ABIArgInfo::getIndirect(0);
1168}
1169
1170ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
1171  // If this is a scalar LLVM value then assume LLVM will pass it in the right
1172  // place naturally.
1173  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1174    // Treat an enum type as its underlying type.
1175    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1176      Ty = EnumTy->getDecl()->getIntegerType();
1177
1178    return (Ty->isPromotableIntegerType() ?
1179            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1180  }
1181
1182  if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1183    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1184
1185  // Compute the byval alignment. We trust the back-end to honor the
1186  // minimum ABI alignment for byval, to make cleaner IR.
1187  const unsigned MinABIAlign = 8;
1188  unsigned Align = Context.getTypeAlign(Ty) / 8;
1189  if (Align > MinABIAlign)
1190    return ABIArgInfo::getIndirect(Align);
1191  return ABIArgInfo::getIndirect(0);
1192}
1193
1194ABIArgInfo X86_64ABIInfo::
1195classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
1196  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
1197  // classification algorithm.
1198  X86_64ABIInfo::Class Lo, Hi;
1199  classify(RetTy, 0, Lo, Hi);
1200
1201  // Check some invariants.
1202  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1203  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
1204  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1205
1206  const llvm::Type *ResType = 0;
1207  switch (Lo) {
1208  case NoClass:
1209    return ABIArgInfo::getIgnore();
1210
1211  case SSEUp:
1212  case X87Up:
1213    assert(0 && "Invalid classification for lo word.");
1214
1215    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
1216    // hidden argument.
1217  case Memory:
1218    return getIndirectReturnResult(RetTy);
1219
1220    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
1221    // available register of the sequence %rax, %rdx is used.
1222  case Integer:
1223    ResType = llvm::Type::getInt64Ty(VMContext); break;
1224
1225    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
1226    // available SSE register of the sequence %xmm0, %xmm1 is used.
1227  case SSE:
1228    ResType = llvm::Type::getDoubleTy(VMContext); break;
1229
1230    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
1231    // returned on the X87 stack in %st0 as 80-bit x87 number.
1232  case X87:
1233    ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
1234
1235    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1236    // part of the value is returned in %st0 and the imaginary part in
1237    // %st1.
1238  case ComplexX87:
1239    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1240    ResType = llvm::StructType::get(VMContext,
1241                                    llvm::Type::getX86_FP80Ty(VMContext),
1242                                    llvm::Type::getX86_FP80Ty(VMContext),
1243                                    NULL);
1244    break;
1245  }
1246
1247  switch (Hi) {
1248    // Memory was handled previously and X87 should
1249    // never occur as a hi class.
1250  case Memory:
1251  case X87:
1252    assert(0 && "Invalid classification for hi word.");
1253
1254  case ComplexX87: // Previously handled.
1255  case NoClass: break;
1256
1257  case Integer:
1258    ResType = llvm::StructType::get(VMContext, ResType,
1259                                    llvm::Type::getInt64Ty(VMContext), NULL);
1260    break;
1261  case SSE:
1262    ResType = llvm::StructType::get(VMContext, ResType,
1263                                    llvm::Type::getDoubleTy(VMContext), NULL);
1264    break;
1265
1266    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1267    // is passed in the upper half of the last used SSE register.
1268    //
1269    // SSEUP should always be preceeded by SSE, just widen.
1270  case SSEUp:
1271    assert(Lo == SSE && "Unexpected SSEUp classification.");
1272    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1273    break;
1274
1275    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1276    // returned together with the previous X87 value in %st0.
1277  case X87Up:
1278    // If X87Up is preceeded by X87, we don't need to do
1279    // anything. However, in some cases with unions it may not be
1280    // preceeded by X87. In such situations we follow gcc and pass the
1281    // extra bits in an SSE reg.
1282    if (Lo != X87)
1283      ResType = llvm::StructType::get(VMContext, ResType,
1284                                      llvm::Type::getDoubleTy(VMContext), NULL);
1285    break;
1286  }
1287
1288  return getCoerceResult(RetTy, ResType);
1289}
1290
1291static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType,
1292                                              unsigned Offset,
1293                                              const llvm::TargetData &TD) {
1294  if (PrefType == 0) return 0;
1295
1296  // Pointers are always 8-bytes at offset 0.
1297  if (Offset == 0 && isa<llvm::PointerType>(PrefType))
1298    return PrefType;
1299
1300  // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that
1301  // the "hole" is not used in the containing struct (just undef padding).
1302  const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType);
1303  if (STy == 0) return 0;
1304
1305  // If this is a struct, recurse into the field at the specified offset.
1306  const llvm::StructLayout *SL = TD.getStructLayout(STy);
1307  if (Offset >= SL->getSizeInBytes()) return 0;
1308
1309  unsigned FieldIdx = SL->getElementContainingOffset(Offset);
1310  Offset -= SL->getElementOffset(FieldIdx);
1311
1312  return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD);
1313}
1314
1315ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
1316                                               llvm::LLVMContext &VMContext,
1317                                               unsigned &neededInt,
1318                                               unsigned &neededSSE,
1319                                               const llvm::Type *PrefType)const{
1320  X86_64ABIInfo::Class Lo, Hi;
1321  classify(Ty, 0, Lo, Hi);
1322
1323  // Check some invariants.
1324  // FIXME: Enforce these by construction.
1325  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1326  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
1327  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1328
1329  neededInt = 0;
1330  neededSSE = 0;
1331  const llvm::Type *ResType = 0;
1332  switch (Lo) {
1333  case NoClass:
1334    return ABIArgInfo::getIgnore();
1335
1336    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
1337    // on the stack.
1338  case Memory:
1339
1340    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
1341    // COMPLEX_X87, it is passed in memory.
1342  case X87:
1343  case ComplexX87:
1344    return getIndirectResult(Ty);
1345
1346  case SSEUp:
1347  case X87Up:
1348    assert(0 && "Invalid classification for lo word.");
1349
1350    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1351    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1352    // and %r9 is used.
1353  case Integer:
1354    // It is always safe to classify this as an i64 argument.
1355    ResType = llvm::Type::getInt64Ty(VMContext);
1356    ++neededInt;
1357
1358    // If we can choose a better 8-byte type based on the preferred type, and if
1359    // that type is still passed in a GPR, use it.
1360    if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD))
1361      if (isa<llvm::IntegerType>(PrefTypeLo) ||
1362          isa<llvm::PointerType>(PrefTypeLo))
1363        ResType = PrefTypeLo;
1364    break;
1365
1366    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1367    // available SSE register is used, the registers are taken in the
1368    // order from %xmm0 to %xmm7.
1369  case SSE:
1370    ++neededSSE;
1371    ResType = llvm::Type::getDoubleTy(VMContext);
1372    break;
1373  }
1374
1375  switch (Hi) {
1376    // Memory was handled previously, ComplexX87 and X87 should
1377    // never occur as hi classes, and X87Up must be preceed by X87,
1378    // which is passed in memory.
1379  case Memory:
1380  case X87:
1381  case ComplexX87:
1382    assert(0 && "Invalid classification for hi word.");
1383    break;
1384
1385  case NoClass: break;
1386
1387  case Integer: {
1388    // It is always safe to classify this as an i64 argument.
1389    const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext);
1390    ++neededInt;
1391
1392    // If we can choose a better 8-byte type based on the preferred type, and if
1393    // that type is still passed in a GPR, use it.
1394    if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD))
1395      if (isa<llvm::IntegerType>(PrefTypeHi) ||
1396          isa<llvm::PointerType>(PrefTypeHi))
1397        HiType = PrefTypeHi;
1398
1399    ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL);
1400    break;
1401  }
1402
1403    // X87Up generally doesn't occur here (long double is passed in
1404    // memory), except in situations involving unions.
1405  case X87Up:
1406  case SSE:
1407    ResType = llvm::StructType::get(VMContext, ResType,
1408                                    llvm::Type::getDoubleTy(VMContext), NULL);
1409    ++neededSSE;
1410    break;
1411
1412    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1413    // eightbyte is passed in the upper half of the last used SSE
1414    // register.
1415  case SSEUp:
1416    assert(Lo == SSE && "Unexpected SSEUp classification.");
1417    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1418    break;
1419  }
1420
1421  return getCoerceResult(Ty, ResType);
1422}
1423
1424void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1425                                llvm::LLVMContext &VMContext,
1426                                const llvm::Type *const *PrefTypes,
1427                                unsigned NumPrefTypes) const {
1428  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext);
1429
1430  // Keep track of the number of assigned registers.
1431  unsigned freeIntRegs = 6, freeSSERegs = 8;
1432
1433  // If the return value is indirect, then the hidden argument is consuming one
1434  // integer register.
1435  if (FI.getReturnInfo().isIndirect())
1436    --freeIntRegs;
1437
1438  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1439  // get assigned (in left-to-right order) for passing as follows...
1440  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1441       it != ie; ++it) {
1442    // If the client specified a preferred IR type to use, pass it down to
1443    // classifyArgumentType.
1444    const llvm::Type *PrefType = 0;
1445    if (NumPrefTypes) {
1446      PrefType = *PrefTypes++;
1447      --NumPrefTypes;
1448    }
1449
1450    unsigned neededInt, neededSSE;
1451    it->info = classifyArgumentType(it->type, VMContext,
1452                                    neededInt, neededSSE, PrefType);
1453
1454    // AMD64-ABI 3.2.3p3: If there are no registers available for any
1455    // eightbyte of an argument, the whole argument is passed on the
1456    // stack. If registers have already been assigned for some
1457    // eightbytes of such an argument, the assignments get reverted.
1458    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1459      freeIntRegs -= neededInt;
1460      freeSSERegs -= neededSSE;
1461    } else {
1462      it->info = getIndirectResult(it->type);
1463    }
1464  }
1465}
1466
1467static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1468                                        QualType Ty,
1469                                        CodeGenFunction &CGF) {
1470  llvm::Value *overflow_arg_area_p =
1471    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1472  llvm::Value *overflow_arg_area =
1473    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1474
1475  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1476  // byte boundary if alignment needed by type exceeds 8 byte boundary.
1477  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1478  if (Align > 8) {
1479    // Note that we follow the ABI & gcc here, even though the type
1480    // could in theory have an alignment greater than 16. This case
1481    // shouldn't ever matter in practice.
1482
1483    // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1484    llvm::Value *Offset =
1485      llvm::ConstantInt::get(CGF.Int32Ty, 15);
1486    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1487    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1488                                                    CGF.Int64Ty);
1489    llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
1490    overflow_arg_area =
1491      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1492                                 overflow_arg_area->getType(),
1493                                 "overflow_arg_area.align");
1494  }
1495
1496  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1497  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1498  llvm::Value *Res =
1499    CGF.Builder.CreateBitCast(overflow_arg_area,
1500                              llvm::PointerType::getUnqual(LTy));
1501
1502  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1503  // l->overflow_arg_area + sizeof(type).
1504  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1505  // an 8 byte boundary.
1506
1507  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1508  llvm::Value *Offset =
1509      llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7)  & ~7);
1510  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1511                                            "overflow_arg_area.next");
1512  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1513
1514  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1515  return Res;
1516}
1517
1518llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1519                                      CodeGenFunction &CGF) const {
1520  llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1521
1522  // Assume that va_list type is correct; should be pointer to LLVM type:
1523  // struct {
1524  //   i32 gp_offset;
1525  //   i32 fp_offset;
1526  //   i8* overflow_arg_area;
1527  //   i8* reg_save_area;
1528  // };
1529  unsigned neededInt, neededSSE;
1530
1531  Ty = CGF.getContext().getCanonicalType(Ty);
1532  ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0);
1533
1534  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1535  // in the registers. If not go to step 7.
1536  if (!neededInt && !neededSSE)
1537    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1538
1539  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1540  // general purpose registers needed to pass type and num_fp to hold
1541  // the number of floating point registers needed.
1542
1543  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1544  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1545  // l->fp_offset > 304 - num_fp * 16 go to step 7.
1546  //
1547  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1548  // register save space).
1549
1550  llvm::Value *InRegs = 0;
1551  llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1552  llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1553  if (neededInt) {
1554    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1555    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1556    InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
1557    InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
1558  }
1559
1560  if (neededSSE) {
1561    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1562    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1563    llvm::Value *FitsInFP =
1564      llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
1565    FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
1566    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1567  }
1568
1569  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1570  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1571  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1572  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1573
1574  // Emit code to load the value if it was passed in registers.
1575
1576  CGF.EmitBlock(InRegBlock);
1577
1578  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1579  // an offset of l->gp_offset and/or l->fp_offset. This may require
1580  // copying to a temporary location in case the parameter is passed
1581  // in different register classes or requires an alignment greater
1582  // than 8 for general purpose registers and 16 for XMM registers.
1583  //
1584  // FIXME: This really results in shameful code when we end up needing to
1585  // collect arguments from different places; often what should result in a
1586  // simple assembling of a structure from scattered addresses has many more
1587  // loads than necessary. Can we clean this up?
1588  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1589  llvm::Value *RegAddr =
1590    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1591                           "reg_save_area");
1592  if (neededInt && neededSSE) {
1593    // FIXME: Cleanup.
1594    assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1595    const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1596    llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1597    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1598    const llvm::Type *TyLo = ST->getElementType(0);
1599    const llvm::Type *TyHi = ST->getElementType(1);
1600    assert((TyLo->isFloatingPointTy() ^ TyHi->isFloatingPointTy()) &&
1601           "Unexpected ABI info for mixed regs");
1602    const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1603    const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1604    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1605    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1606    llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
1607    llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
1608    llvm::Value *V =
1609      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1610    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1611    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1612    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1613
1614    RegAddr = CGF.Builder.CreateBitCast(Tmp,
1615                                        llvm::PointerType::getUnqual(LTy));
1616  } else if (neededInt) {
1617    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1618    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1619                                        llvm::PointerType::getUnqual(LTy));
1620  } else if (neededSSE == 1) {
1621    RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1622    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1623                                        llvm::PointerType::getUnqual(LTy));
1624  } else {
1625    assert(neededSSE == 2 && "Invalid number of needed registers!");
1626    // SSE registers are spaced 16 bytes apart in the register save
1627    // area, we need to collect the two eightbytes together.
1628    llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1629    llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
1630    const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
1631    const llvm::Type *DblPtrTy =
1632      llvm::PointerType::getUnqual(DoubleTy);
1633    const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
1634                                                       DoubleTy, NULL);
1635    llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1636    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1637                                                         DblPtrTy));
1638    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1639    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1640                                                         DblPtrTy));
1641    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1642    RegAddr = CGF.Builder.CreateBitCast(Tmp,
1643                                        llvm::PointerType::getUnqual(LTy));
1644  }
1645
1646  // AMD64-ABI 3.5.7p5: Step 5. Set:
1647  // l->gp_offset = l->gp_offset + num_gp * 8
1648  // l->fp_offset = l->fp_offset + num_fp * 16.
1649  if (neededInt) {
1650    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
1651    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1652                            gp_offset_p);
1653  }
1654  if (neededSSE) {
1655    llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
1656    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1657                            fp_offset_p);
1658  }
1659  CGF.EmitBranch(ContBlock);
1660
1661  // Emit code to load the value if it was passed in memory.
1662
1663  CGF.EmitBlock(InMemBlock);
1664  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1665
1666  // Return the appropriate result.
1667
1668  CGF.EmitBlock(ContBlock);
1669  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1670                                                 "vaarg.addr");
1671  ResAddr->reserveOperandSpace(2);
1672  ResAddr->addIncoming(RegAddr, InRegBlock);
1673  ResAddr->addIncoming(MemAddr, InMemBlock);
1674  return ResAddr;
1675}
1676
1677
1678
1679//===----------------------------------------------------------------------===//
1680// PIC16 ABI Implementation
1681//===----------------------------------------------------------------------===//
1682
1683namespace {
1684
1685class PIC16ABIInfo : public ABIInfo {
1686  ABIArgInfo classifyReturnType(QualType RetTy,
1687                                ASTContext &Context,
1688                                llvm::LLVMContext &VMContext) const;
1689
1690  ABIArgInfo classifyArgumentType(QualType RetTy,
1691                                  ASTContext &Context,
1692                                  llvm::LLVMContext &VMContext) const;
1693
1694  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1695                           llvm::LLVMContext &VMContext,
1696                           const llvm::Type *const *PrefTypes,
1697                           unsigned NumPrefTypes) const {
1698    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1699                                            VMContext);
1700    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1701         it != ie; ++it)
1702      it->info = classifyArgumentType(it->type, Context, VMContext);
1703  }
1704
1705  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1706                                 CodeGenFunction &CGF) const;
1707};
1708
1709class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
1710public:
1711  PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
1712};
1713
1714}
1715
1716ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
1717                                            ASTContext &Context,
1718                                          llvm::LLVMContext &VMContext) const {
1719  if (RetTy->isVoidType()) {
1720    return ABIArgInfo::getIgnore();
1721  } else {
1722    return ABIArgInfo::getDirect();
1723  }
1724}
1725
1726ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
1727                                              ASTContext &Context,
1728                                          llvm::LLVMContext &VMContext) const {
1729  return ABIArgInfo::getDirect();
1730}
1731
1732llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1733                                     CodeGenFunction &CGF) const {
1734  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1735  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
1736
1737  CGBuilderTy &Builder = CGF.Builder;
1738  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1739                                                       "ap");
1740  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1741  llvm::Type *PTy =
1742    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1743  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1744
1745  uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
1746
1747  llvm::Value *NextAddr =
1748    Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1749                          llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
1750                      "ap.next");
1751  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1752
1753  return AddrTyped;
1754}
1755
1756
1757// PowerPC-32
1758
1759namespace {
1760class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
1761public:
1762  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
1763    // This is recovered from gcc output.
1764    return 1; // r1 is the dedicated stack pointer
1765  }
1766
1767  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1768                               llvm::Value *Address) const;
1769};
1770
1771}
1772
1773bool
1774PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1775                                                llvm::Value *Address) const {
1776  // This is calculated from the LLVM and GCC tables and verified
1777  // against gcc output.  AFAIK all ABIs use the same encoding.
1778
1779  CodeGen::CGBuilderTy &Builder = CGF.Builder;
1780  llvm::LLVMContext &Context = CGF.getLLVMContext();
1781
1782  const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
1783  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
1784  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
1785  llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
1786
1787  // 0-31: r0-31, the 4-byte general-purpose registers
1788  AssignToArrayRange(Builder, Address, Four8, 0, 31);
1789
1790  // 32-63: fp0-31, the 8-byte floating-point registers
1791  AssignToArrayRange(Builder, Address, Eight8, 32, 63);
1792
1793  // 64-76 are various 4-byte special-purpose registers:
1794  // 64: mq
1795  // 65: lr
1796  // 66: ctr
1797  // 67: ap
1798  // 68-75 cr0-7
1799  // 76: xer
1800  AssignToArrayRange(Builder, Address, Four8, 64, 76);
1801
1802  // 77-108: v0-31, the 16-byte vector registers
1803  AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
1804
1805  // 109: vrsave
1806  // 110: vscr
1807  // 111: spe_acc
1808  // 112: spefscr
1809  // 113: sfp
1810  AssignToArrayRange(Builder, Address, Four8, 109, 113);
1811
1812  return false;
1813}
1814
1815
1816//===----------------------------------------------------------------------===//
1817// ARM ABI Implementation
1818//===----------------------------------------------------------------------===//
1819
1820namespace {
1821
1822class ARMABIInfo : public ABIInfo {
1823public:
1824  enum ABIKind {
1825    APCS = 0,
1826    AAPCS = 1,
1827    AAPCS_VFP
1828  };
1829
1830private:
1831  ABIKind Kind;
1832
1833public:
1834  ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
1835
1836private:
1837  ABIKind getABIKind() const { return Kind; }
1838
1839  ABIArgInfo classifyReturnType(QualType RetTy,
1840                                ASTContext &Context,
1841                                llvm::LLVMContext &VMCOntext) const;
1842
1843  ABIArgInfo classifyArgumentType(QualType RetTy,
1844                                  ASTContext &Context,
1845                                  llvm::LLVMContext &VMContext) const;
1846
1847  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1848                           llvm::LLVMContext &VMContext,
1849                           const llvm::Type *const *PrefTypes,
1850                           unsigned NumPrefTypes) const;
1851
1852  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1853                                 CodeGenFunction &CGF) const;
1854};
1855
1856class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
1857public:
1858  ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
1859    :TargetCodeGenInfo(new ARMABIInfo(K)) {}
1860
1861  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
1862    return 13;
1863  }
1864};
1865
1866}
1867
1868void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1869                             llvm::LLVMContext &VMContext,
1870                             const llvm::Type *const *PrefTypes,
1871                             unsigned NumPrefTypes) const {
1872  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1873                                          VMContext);
1874  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1875       it != ie; ++it) {
1876    it->info = classifyArgumentType(it->type, Context, VMContext);
1877  }
1878
1879  const llvm::Triple &Triple(Context.Target.getTriple());
1880  llvm::CallingConv::ID DefaultCC;
1881  if (Triple.getEnvironmentName() == "gnueabi" ||
1882      Triple.getEnvironmentName() == "eabi")
1883    DefaultCC = llvm::CallingConv::ARM_AAPCS;
1884  else
1885    DefaultCC = llvm::CallingConv::ARM_APCS;
1886
1887  switch (getABIKind()) {
1888  case APCS:
1889    if (DefaultCC != llvm::CallingConv::ARM_APCS)
1890      FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
1891    break;
1892
1893  case AAPCS:
1894    if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
1895      FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
1896    break;
1897
1898  case AAPCS_VFP:
1899    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
1900    break;
1901  }
1902}
1903
1904ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
1905                                            ASTContext &Context,
1906                                          llvm::LLVMContext &VMContext) const {
1907  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1908    // Treat an enum type as its underlying type.
1909    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1910      Ty = EnumTy->getDecl()->getIntegerType();
1911
1912    return (Ty->isPromotableIntegerType() ?
1913            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1914  }
1915
1916  // Ignore empty records.
1917  if (isEmptyRecord(Context, Ty, true))
1918    return ABIArgInfo::getIgnore();
1919
1920  // Structures with either a non-trivial destructor or a non-trivial
1921  // copy constructor are always indirect.
1922  if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1923    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1924
1925  // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1926  // backend doesn't support byval.
1927  // FIXME: This doesn't handle alignment > 64 bits.
1928  const llvm::Type* ElemTy;
1929  unsigned SizeRegs;
1930  if (Context.getTypeAlign(Ty) > 32) {
1931    ElemTy = llvm::Type::getInt64Ty(VMContext);
1932    SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1933  } else {
1934    ElemTy = llvm::Type::getInt32Ty(VMContext);
1935    SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1936  }
1937  std::vector<const llvm::Type*> LLVMFields;
1938  LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
1939  const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
1940  return ABIArgInfo::getCoerce(STy);
1941}
1942
1943static bool isIntegerLikeType(QualType Ty,
1944                              ASTContext &Context,
1945                              llvm::LLVMContext &VMContext) {
1946  // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
1947  // is called integer-like if its size is less than or equal to one word, and
1948  // the offset of each of its addressable sub-fields is zero.
1949
1950  uint64_t Size = Context.getTypeSize(Ty);
1951
1952  // Check that the type fits in a word.
1953  if (Size > 32)
1954    return false;
1955
1956  // FIXME: Handle vector types!
1957  if (Ty->isVectorType())
1958    return false;
1959
1960  // Float types are never treated as "integer like".
1961  if (Ty->isRealFloatingType())
1962    return false;
1963
1964  // If this is a builtin or pointer type then it is ok.
1965  if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
1966    return true;
1967
1968  // Small complex integer types are "integer like".
1969  if (const ComplexType *CT = Ty->getAs<ComplexType>())
1970    return isIntegerLikeType(CT->getElementType(), Context, VMContext);
1971
1972  // Single element and zero sized arrays should be allowed, by the definition
1973  // above, but they are not.
1974
1975  // Otherwise, it must be a record type.
1976  const RecordType *RT = Ty->getAs<RecordType>();
1977  if (!RT) return false;
1978
1979  // Ignore records with flexible arrays.
1980  const RecordDecl *RD = RT->getDecl();
1981  if (RD->hasFlexibleArrayMember())
1982    return false;
1983
1984  // Check that all sub-fields are at offset 0, and are themselves "integer
1985  // like".
1986  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1987
1988  bool HadField = false;
1989  unsigned idx = 0;
1990  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1991       i != e; ++i, ++idx) {
1992    const FieldDecl *FD = *i;
1993
1994    // Bit-fields are not addressable, we only need to verify they are "integer
1995    // like". We still have to disallow a subsequent non-bitfield, for example:
1996    //   struct { int : 0; int x }
1997    // is non-integer like according to gcc.
1998    if (FD->isBitField()) {
1999      if (!RD->isUnion())
2000        HadField = true;
2001
2002      if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2003        return false;
2004
2005      continue;
2006    }
2007
2008    // Check if this field is at offset 0.
2009    if (Layout.getFieldOffset(idx) != 0)
2010      return false;
2011
2012    if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2013      return false;
2014
2015    // Only allow at most one field in a structure. This doesn't match the
2016    // wording above, but follows gcc in situations with a field following an
2017    // empty structure.
2018    if (!RD->isUnion()) {
2019      if (HadField)
2020        return false;
2021
2022      HadField = true;
2023    }
2024  }
2025
2026  return true;
2027}
2028
2029ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
2030                                          ASTContext &Context,
2031                                          llvm::LLVMContext &VMContext) const {
2032  if (RetTy->isVoidType())
2033    return ABIArgInfo::getIgnore();
2034
2035  if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2036    // Treat an enum type as its underlying type.
2037    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2038      RetTy = EnumTy->getDecl()->getIntegerType();
2039
2040    return (RetTy->isPromotableIntegerType() ?
2041            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2042  }
2043
2044  // Structures with either a non-trivial destructor or a non-trivial
2045  // copy constructor are always indirect.
2046  if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
2047    return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2048
2049  // Are we following APCS?
2050  if (getABIKind() == APCS) {
2051    if (isEmptyRecord(Context, RetTy, false))
2052      return ABIArgInfo::getIgnore();
2053
2054    // Complex types are all returned as packed integers.
2055    //
2056    // FIXME: Consider using 2 x vector types if the back end handles them
2057    // correctly.
2058    if (RetTy->isAnyComplexType())
2059      return ABIArgInfo::getCoerce(llvm::IntegerType::get(
2060                                     VMContext, Context.getTypeSize(RetTy)));
2061
2062    // Integer like structures are returned in r0.
2063    if (isIntegerLikeType(RetTy, Context, VMContext)) {
2064      // Return in the smallest viable integer type.
2065      uint64_t Size = Context.getTypeSize(RetTy);
2066      if (Size <= 8)
2067        return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
2068      if (Size <= 16)
2069        return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
2070      return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
2071    }
2072
2073    // Otherwise return in memory.
2074    return ABIArgInfo::getIndirect(0);
2075  }
2076
2077  // Otherwise this is an AAPCS variant.
2078
2079  if (isEmptyRecord(Context, RetTy, true))
2080    return ABIArgInfo::getIgnore();
2081
2082  // Aggregates <= 4 bytes are returned in r0; other aggregates
2083  // are returned indirectly.
2084  uint64_t Size = Context.getTypeSize(RetTy);
2085  if (Size <= 32) {
2086    // Return in the smallest viable integer type.
2087    if (Size <= 8)
2088      return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
2089    if (Size <= 16)
2090      return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
2091    return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
2092  }
2093
2094  return ABIArgInfo::getIndirect(0);
2095}
2096
2097llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2098                                   CodeGenFunction &CGF) const {
2099  // FIXME: Need to handle alignment
2100  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2101  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2102
2103  CGBuilderTy &Builder = CGF.Builder;
2104  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2105                                                       "ap");
2106  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2107  llvm::Type *PTy =
2108    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2109  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2110
2111  uint64_t Offset =
2112    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
2113  llvm::Value *NextAddr =
2114    Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2115                      "ap.next");
2116  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2117
2118  return AddrTyped;
2119}
2120
2121ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
2122                                              ASTContext &Context,
2123                                          llvm::LLVMContext &VMContext) const {
2124  if (RetTy->isVoidType()) {
2125    return ABIArgInfo::getIgnore();
2126  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2127    return ABIArgInfo::getIndirect(0);
2128  } else {
2129    // Treat an enum type as its underlying type.
2130    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2131      RetTy = EnumTy->getDecl()->getIntegerType();
2132
2133    return (RetTy->isPromotableIntegerType() ?
2134            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2135  }
2136}
2137
2138//===----------------------------------------------------------------------===//
2139// SystemZ ABI Implementation
2140//===----------------------------------------------------------------------===//
2141
2142namespace {
2143
2144class SystemZABIInfo : public ABIInfo {
2145  bool isPromotableIntegerType(QualType Ty) const;
2146
2147  ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
2148                                llvm::LLVMContext &VMContext) const;
2149
2150  ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
2151                                  llvm::LLVMContext &VMContext) const;
2152
2153  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
2154                          llvm::LLVMContext &VMContext,
2155                           const llvm::Type *const *PrefTypes,
2156                           unsigned NumPrefTypes) const {
2157    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
2158                                            Context, VMContext);
2159    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2160         it != ie; ++it)
2161      it->info = classifyArgumentType(it->type, Context, VMContext);
2162  }
2163
2164  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2165                                 CodeGenFunction &CGF) const;
2166};
2167
2168class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
2169public:
2170  SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
2171};
2172
2173}
2174
2175bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
2176  // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
2177  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2178    switch (BT->getKind()) {
2179    case BuiltinType::Bool:
2180    case BuiltinType::Char_S:
2181    case BuiltinType::Char_U:
2182    case BuiltinType::SChar:
2183    case BuiltinType::UChar:
2184    case BuiltinType::Short:
2185    case BuiltinType::UShort:
2186    case BuiltinType::Int:
2187    case BuiltinType::UInt:
2188      return true;
2189    default:
2190      return false;
2191    }
2192  return false;
2193}
2194
2195llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2196                                       CodeGenFunction &CGF) const {
2197  // FIXME: Implement
2198  return 0;
2199}
2200
2201
2202ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
2203                                              ASTContext &Context,
2204                                           llvm::LLVMContext &VMContext) const {
2205  if (RetTy->isVoidType()) {
2206    return ABIArgInfo::getIgnore();
2207  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2208    return ABIArgInfo::getIndirect(0);
2209  } else {
2210    return (isPromotableIntegerType(RetTy) ?
2211            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2212  }
2213}
2214
2215ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
2216                                                ASTContext &Context,
2217                                           llvm::LLVMContext &VMContext) const {
2218  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
2219    return ABIArgInfo::getIndirect(0);
2220  } else {
2221    return (isPromotableIntegerType(Ty) ?
2222            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2223  }
2224}
2225
2226//===----------------------------------------------------------------------===//
2227// MSP430 ABI Implementation
2228//===----------------------------------------------------------------------===//
2229
2230namespace {
2231
2232class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
2233public:
2234  MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
2235  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2236                           CodeGen::CodeGenModule &M) const;
2237};
2238
2239}
2240
2241void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2242                                                  llvm::GlobalValue *GV,
2243                                             CodeGen::CodeGenModule &M) const {
2244  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
2245    if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
2246      // Handle 'interrupt' attribute:
2247      llvm::Function *F = cast<llvm::Function>(GV);
2248
2249      // Step 1: Set ISR calling convention.
2250      F->setCallingConv(llvm::CallingConv::MSP430_INTR);
2251
2252      // Step 2: Add attributes goodness.
2253      F->addFnAttr(llvm::Attribute::NoInline);
2254
2255      // Step 3: Emit ISR vector alias.
2256      unsigned Num = attr->getNumber() + 0xffe0;
2257      new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2258                            "vector_" +
2259                            llvm::LowercaseString(llvm::utohexstr(Num)),
2260                            GV, &M.getModule());
2261    }
2262  }
2263}
2264
2265//===----------------------------------------------------------------------===//
2266// MIPS ABI Implementation.  This works for both little-endian and
2267// big-endian variants.
2268//===----------------------------------------------------------------------===//
2269
2270namespace {
2271class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
2272public:
2273  MIPSTargetCodeGenInfo(): TargetCodeGenInfo(new DefaultABIInfo()) {}
2274
2275  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
2276    return 29;
2277  }
2278
2279  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2280                               llvm::Value *Address) const;
2281};
2282}
2283
2284bool
2285MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2286                                               llvm::Value *Address) const {
2287  // This information comes from gcc's implementation, which seems to
2288  // as canonical as it gets.
2289
2290  CodeGen::CGBuilderTy &Builder = CGF.Builder;
2291  llvm::LLVMContext &Context = CGF.getLLVMContext();
2292
2293  // Everything on MIPS is 4 bytes.  Double-precision FP registers
2294  // are aliased to pairs of single-precision FP registers.
2295  const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2296  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2297
2298  // 0-31 are the general purpose registers, $0 - $31.
2299  // 32-63 are the floating-point registers, $f0 - $f31.
2300  // 64 and 65 are the multiply/divide registers, $hi and $lo.
2301  // 66 is the (notional, I think) register for signal-handler return.
2302  AssignToArrayRange(Builder, Address, Four8, 0, 65);
2303
2304  // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
2305  // They are one bit wide and ignored here.
2306
2307  // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
2308  // (coprocessor 1 is the FP unit)
2309  // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
2310  // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
2311  // 176-181 are the DSP accumulator registers.
2312  AssignToArrayRange(Builder, Address, Four8, 80, 181);
2313
2314  return false;
2315}
2316
2317
2318const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
2319  if (TheTargetCodeGenInfo)
2320    return *TheTargetCodeGenInfo;
2321
2322  // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
2323  // free it.
2324
2325  const llvm::Triple &Triple = getContext().Target.getTriple();
2326  switch (Triple.getArch()) {
2327  default:
2328    return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo());
2329
2330  case llvm::Triple::mips:
2331  case llvm::Triple::mipsel:
2332    return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo());
2333
2334  case llvm::Triple::arm:
2335  case llvm::Triple::thumb:
2336    // FIXME: We want to know the float calling convention as well.
2337    if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
2338      return *(TheTargetCodeGenInfo =
2339               new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
2340
2341    return *(TheTargetCodeGenInfo =
2342             new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
2343
2344  case llvm::Triple::pic16:
2345    return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
2346
2347  case llvm::Triple::ppc:
2348    return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo());
2349
2350  case llvm::Triple::systemz:
2351    return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
2352
2353  case llvm::Triple::msp430:
2354    return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
2355
2356  case llvm::Triple::x86:
2357    switch (Triple.getOS()) {
2358    case llvm::Triple::Darwin:
2359      return *(TheTargetCodeGenInfo =
2360               new X86_32TargetCodeGenInfo(Context, true, true));
2361    case llvm::Triple::Cygwin:
2362    case llvm::Triple::MinGW32:
2363    case llvm::Triple::MinGW64:
2364    case llvm::Triple::AuroraUX:
2365    case llvm::Triple::DragonFly:
2366    case llvm::Triple::FreeBSD:
2367    case llvm::Triple::OpenBSD:
2368      return *(TheTargetCodeGenInfo =
2369               new X86_32TargetCodeGenInfo(Context, false, true));
2370
2371    default:
2372      return *(TheTargetCodeGenInfo =
2373               new X86_32TargetCodeGenInfo(Context, false, false));
2374    }
2375
2376  case llvm::Triple::x86_64:
2377    return *(TheTargetCodeGenInfo =
2378               new X86_64TargetCodeGenInfo(Context, TheTargetData));
2379  }
2380}
2381