CGCall.cpp revision 193326
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// These classes wrap the information about a call or function 11// definition used to handle ABI compliancy. 12// 13//===----------------------------------------------------------------------===// 14 15#include "CGCall.h" 16#include "CodeGenFunction.h" 17#include "CodeGenModule.h" 18#include "clang/Basic/TargetInfo.h" 19#include "clang/AST/ASTContext.h" 20#include "clang/AST/Decl.h" 21#include "clang/AST/DeclCXX.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/RecordLayout.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/Attributes.h" 26#include "llvm/Support/CallSite.h" 27#include "llvm/Support/MathExtras.h" 28#include "llvm/Target/TargetData.h" 29 30#include "ABIInfo.h" 31 32using namespace clang; 33using namespace CodeGen; 34 35/***/ 36 37// FIXME: Use iterator and sidestep silly type array creation. 38 39const 40CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) { 41 return getFunctionInfo(FTNP->getResultType(), 42 llvm::SmallVector<QualType, 16>()); 43} 44 45const 46CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) { 47 llvm::SmallVector<QualType, 16> ArgTys; 48 // FIXME: Kill copy. 49 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 50 ArgTys.push_back(FTP->getArgType(i)); 51 return getFunctionInfo(FTP->getResultType(), ArgTys); 52} 53 54const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { 55 llvm::SmallVector<QualType, 16> ArgTys; 56 // Add the 'this' pointer unless this is a static method. 57 if (MD->isInstance()) 58 ArgTys.push_back(MD->getThisType(Context)); 59 60 const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType(); 61 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) 62 ArgTys.push_back(FTP->getArgType(i)); 63 return getFunctionInfo(FTP->getResultType(), ArgTys); 64} 65 66const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { 67 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 68 if (MD->isInstance()) 69 return getFunctionInfo(MD); 70 71 const FunctionType *FTy = FD->getType()->getAsFunctionType(); 72 if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy)) 73 return getFunctionInfo(FTP); 74 return getFunctionInfo(cast<FunctionNoProtoType>(FTy)); 75} 76 77const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { 78 llvm::SmallVector<QualType, 16> ArgTys; 79 ArgTys.push_back(MD->getSelfDecl()->getType()); 80 ArgTys.push_back(Context.getObjCSelType()); 81 // FIXME: Kill copy? 82 for (ObjCMethodDecl::param_iterator i = MD->param_begin(), 83 e = MD->param_end(); i != e; ++i) 84 ArgTys.push_back((*i)->getType()); 85 return getFunctionInfo(MD->getResultType(), ArgTys); 86} 87 88const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 89 const CallArgList &Args) { 90 // FIXME: Kill copy. 91 llvm::SmallVector<QualType, 16> ArgTys; 92 for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); 93 i != e; ++i) 94 ArgTys.push_back(i->second); 95 return getFunctionInfo(ResTy, ArgTys); 96} 97 98const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 99 const FunctionArgList &Args) { 100 // FIXME: Kill copy. 101 llvm::SmallVector<QualType, 16> ArgTys; 102 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 103 i != e; ++i) 104 ArgTys.push_back(i->second); 105 return getFunctionInfo(ResTy, ArgTys); 106} 107 108const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, 109 const llvm::SmallVector<QualType, 16> &ArgTys) { 110 // Lookup or create unique function info. 111 llvm::FoldingSetNodeID ID; 112 CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end()); 113 114 void *InsertPos = 0; 115 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); 116 if (FI) 117 return *FI; 118 119 // Construct the function info. 120 FI = new CGFunctionInfo(ResTy, ArgTys); 121 FunctionInfos.InsertNode(FI, InsertPos); 122 123 // Compute ABI information. 124 getABIInfo().computeInfo(*FI, getContext()); 125 126 return *FI; 127} 128 129/***/ 130 131ABIInfo::~ABIInfo() {} 132 133void ABIArgInfo::dump() const { 134 fprintf(stderr, "(ABIArgInfo Kind="); 135 switch (TheKind) { 136 case Direct: 137 fprintf(stderr, "Direct"); 138 break; 139 case Ignore: 140 fprintf(stderr, "Ignore"); 141 break; 142 case Coerce: 143 fprintf(stderr, "Coerce Type="); 144 getCoerceToType()->print(llvm::errs()); 145 break; 146 case Indirect: 147 fprintf(stderr, "Indirect Align=%d", getIndirectAlign()); 148 break; 149 case Expand: 150 fprintf(stderr, "Expand"); 151 break; 152 } 153 fprintf(stderr, ")\n"); 154} 155 156/***/ 157 158static bool isEmptyRecord(ASTContext &Context, QualType T); 159 160/// isEmptyField - Return true iff a the field is "empty", that is it 161/// is an unnamed bit-field or an (array of) empty record(s). 162static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) { 163 if (FD->isUnnamedBitfield()) 164 return true; 165 166 QualType FT = FD->getType(); 167 // Constant arrays of empty records count as empty, strip them off. 168 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 169 FT = AT->getElementType(); 170 171 return isEmptyRecord(Context, FT); 172} 173 174/// isEmptyRecord - Return true iff a structure contains only empty 175/// fields. Note that a structure with a flexible array member is not 176/// considered empty. 177static bool isEmptyRecord(ASTContext &Context, QualType T) { 178 const RecordType *RT = T->getAsRecordType(); 179 if (!RT) 180 return 0; 181 const RecordDecl *RD = RT->getDecl(); 182 if (RD->hasFlexibleArrayMember()) 183 return false; 184 for (RecordDecl::field_iterator i = RD->field_begin(Context), 185 e = RD->field_end(Context); i != e; ++i) 186 if (!isEmptyField(Context, *i)) 187 return false; 188 return true; 189} 190 191/// isSingleElementStruct - Determine if a structure is a "single 192/// element struct", i.e. it has exactly one non-empty field or 193/// exactly one field which is itself a single element 194/// struct. Structures with flexible array members are never 195/// considered single element structs. 196/// 197/// \return The field declaration for the single non-empty field, if 198/// it exists. 199static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 200 const RecordType *RT = T->getAsStructureType(); 201 if (!RT) 202 return 0; 203 204 const RecordDecl *RD = RT->getDecl(); 205 if (RD->hasFlexibleArrayMember()) 206 return 0; 207 208 const Type *Found = 0; 209 for (RecordDecl::field_iterator i = RD->field_begin(Context), 210 e = RD->field_end(Context); i != e; ++i) { 211 const FieldDecl *FD = *i; 212 QualType FT = FD->getType(); 213 214 // Ignore empty fields. 215 if (isEmptyField(Context, FD)) 216 continue; 217 218 // If we already found an element then this isn't a single-element 219 // struct. 220 if (Found) 221 return 0; 222 223 // Treat single element arrays as the element. 224 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 225 if (AT->getSize().getZExtValue() != 1) 226 break; 227 FT = AT->getElementType(); 228 } 229 230 if (!CodeGenFunction::hasAggregateLLVMType(FT)) { 231 Found = FT.getTypePtr(); 232 } else { 233 Found = isSingleElementStruct(FT, Context); 234 if (!Found) 235 return 0; 236 } 237 } 238 239 return Found; 240} 241 242static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 243 if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) 244 return false; 245 246 uint64_t Size = Context.getTypeSize(Ty); 247 return Size == 32 || Size == 64; 248} 249 250static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, 251 ASTContext &Context) { 252 for (RecordDecl::field_iterator i = RD->field_begin(Context), 253 e = RD->field_end(Context); i != e; ++i) { 254 const FieldDecl *FD = *i; 255 256 if (!is32Or64BitBasicType(FD->getType(), Context)) 257 return false; 258 259 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 260 // how to expand them yet, and the predicate for telling if a bitfield still 261 // counts as "basic" is more complicated than what we were doing previously. 262 if (FD->isBitField()) 263 return false; 264 } 265 266 return true; 267} 268 269namespace { 270/// DefaultABIInfo - The default implementation for ABI specific 271/// details. This implementation provides information which results in 272/// self-consistent and sensible LLVM IR generation, but does not 273/// conform to any particular ABI. 274class DefaultABIInfo : public ABIInfo { 275 ABIArgInfo classifyReturnType(QualType RetTy, 276 ASTContext &Context) const; 277 278 ABIArgInfo classifyArgumentType(QualType RetTy, 279 ASTContext &Context) const; 280 281 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 282 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 283 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 284 it != ie; ++it) 285 it->info = classifyArgumentType(it->type, Context); 286 } 287 288 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 289 CodeGenFunction &CGF) const; 290}; 291 292/// X86_32ABIInfo - The X86-32 ABI information. 293class X86_32ABIInfo : public ABIInfo { 294 ASTContext &Context; 295 bool IsDarwin; 296 297 static bool isRegisterSize(unsigned Size) { 298 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 299 } 300 301 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 302 303public: 304 ABIArgInfo classifyReturnType(QualType RetTy, 305 ASTContext &Context) const; 306 307 ABIArgInfo classifyArgumentType(QualType RetTy, 308 ASTContext &Context) const; 309 310 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 311 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 312 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 313 it != ie; ++it) 314 it->info = classifyArgumentType(it->type, Context); 315 } 316 317 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 318 CodeGenFunction &CGF) const; 319 320 X86_32ABIInfo(ASTContext &Context, bool d) 321 : ABIInfo(), Context(Context), IsDarwin(d) {} 322}; 323} 324 325 326/// shouldReturnTypeInRegister - Determine if the given type should be 327/// passed in a register (for the Darwin ABI). 328bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 329 ASTContext &Context) { 330 uint64_t Size = Context.getTypeSize(Ty); 331 332 // Type must be register sized. 333 if (!isRegisterSize(Size)) 334 return false; 335 336 if (Ty->isVectorType()) { 337 // 64- and 128- bit vectors inside structures are not returned in 338 // registers. 339 if (Size == 64 || Size == 128) 340 return false; 341 342 return true; 343 } 344 345 // If this is a builtin, pointer, or complex type, it is ok. 346 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType()) 347 return true; 348 349 // Arrays are treated like records. 350 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 351 return shouldReturnTypeInRegister(AT->getElementType(), Context); 352 353 // Otherwise, it must be a record type. 354 const RecordType *RT = Ty->getAsRecordType(); 355 if (!RT) return false; 356 357 // Structure types are passed in register if all fields would be 358 // passed in a register. 359 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context), 360 e = RT->getDecl()->field_end(Context); i != e; ++i) { 361 const FieldDecl *FD = *i; 362 363 // Empty fields are ignored. 364 if (isEmptyField(Context, FD)) 365 continue; 366 367 // Check fields recursively. 368 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 369 return false; 370 } 371 372 return true; 373} 374 375ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 376 ASTContext &Context) const { 377 if (RetTy->isVoidType()) { 378 return ABIArgInfo::getIgnore(); 379 } else if (const VectorType *VT = RetTy->getAsVectorType()) { 380 // On Darwin, some vectors are returned in registers. 381 if (IsDarwin) { 382 uint64_t Size = Context.getTypeSize(RetTy); 383 384 // 128-bit vectors are a special case; they are returned in 385 // registers and we need to make sure to pick a type the LLVM 386 // backend will like. 387 if (Size == 128) 388 return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, 389 2)); 390 391 // Always return in register if it fits in a general purpose 392 // register, or if it is 64 bits and has a single element. 393 if ((Size == 8 || Size == 16 || Size == 32) || 394 (Size == 64 && VT->getNumElements() == 1)) 395 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 396 397 return ABIArgInfo::getIndirect(0); 398 } 399 400 return ABIArgInfo::getDirect(); 401 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 402 // Structures with flexible arrays are always indirect. 403 if (const RecordType *RT = RetTy->getAsStructureType()) 404 if (RT->getDecl()->hasFlexibleArrayMember()) 405 return ABIArgInfo::getIndirect(0); 406 407 // Outside of Darwin, structs and unions are always indirect. 408 if (!IsDarwin && !RetTy->isAnyComplexType()) 409 return ABIArgInfo::getIndirect(0); 410 411 // Classify "single element" structs as their element type. 412 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) { 413 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { 414 if (BT->isIntegerType()) { 415 // We need to use the size of the structure, padding 416 // bit-fields can adjust that to be larger than the single 417 // element type. 418 uint64_t Size = Context.getTypeSize(RetTy); 419 return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); 420 } else if (BT->getKind() == BuiltinType::Float) { 421 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && 422 "Unexpect single element structure size!"); 423 return ABIArgInfo::getCoerce(llvm::Type::FloatTy); 424 } else if (BT->getKind() == BuiltinType::Double) { 425 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && 426 "Unexpect single element structure size!"); 427 return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); 428 } 429 } else if (SeltTy->isPointerType()) { 430 // FIXME: It would be really nice if this could come out as the proper 431 // pointer type. 432 llvm::Type *PtrTy = 433 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 434 return ABIArgInfo::getCoerce(PtrTy); 435 } else if (SeltTy->isVectorType()) { 436 // 64- and 128-bit vectors are never returned in a 437 // register when inside a structure. 438 uint64_t Size = Context.getTypeSize(RetTy); 439 if (Size == 64 || Size == 128) 440 return ABIArgInfo::getIndirect(0); 441 442 return classifyReturnType(QualType(SeltTy, 0), Context); 443 } 444 } 445 446 // Small structures which are register sized are generally returned 447 // in a register. 448 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) { 449 uint64_t Size = Context.getTypeSize(RetTy); 450 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); 451 } 452 453 return ABIArgInfo::getIndirect(0); 454 } else { 455 return ABIArgInfo::getDirect(); 456 } 457} 458 459ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 460 ASTContext &Context) const { 461 // FIXME: Set alignment on indirect arguments. 462 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 463 // Structures with flexible arrays are always indirect. 464 if (const RecordType *RT = Ty->getAsStructureType()) 465 if (RT->getDecl()->hasFlexibleArrayMember()) 466 return ABIArgInfo::getIndirect(0); 467 468 // Ignore empty structs. 469 uint64_t Size = Context.getTypeSize(Ty); 470 if (Ty->isStructureType() && Size == 0) 471 return ABIArgInfo::getIgnore(); 472 473 // Expand structs with size <= 128-bits which consist only of 474 // basic types (int, long long, float, double, xxx*). This is 475 // non-recursive and does not ignore empty fields. 476 if (const RecordType *RT = Ty->getAsStructureType()) { 477 if (Context.getTypeSize(Ty) <= 4*32 && 478 areAllFields32Or64BitBasicType(RT->getDecl(), Context)) 479 return ABIArgInfo::getExpand(); 480 } 481 482 return ABIArgInfo::getIndirect(0); 483 } else { 484 return ABIArgInfo::getDirect(); 485 } 486} 487 488llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 489 CodeGenFunction &CGF) const { 490 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 491 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 492 493 CGBuilderTy &Builder = CGF.Builder; 494 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 495 "ap"); 496 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 497 llvm::Type *PTy = 498 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 499 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 500 501 uint64_t Offset = 502 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 503 llvm::Value *NextAddr = 504 Builder.CreateGEP(Addr, 505 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 506 "ap.next"); 507 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 508 509 return AddrTyped; 510} 511 512namespace { 513/// X86_64ABIInfo - The X86_64 ABI information. 514class X86_64ABIInfo : public ABIInfo { 515 enum Class { 516 Integer = 0, 517 SSE, 518 SSEUp, 519 X87, 520 X87Up, 521 ComplexX87, 522 NoClass, 523 Memory 524 }; 525 526 /// merge - Implement the X86_64 ABI merging algorithm. 527 /// 528 /// Merge an accumulating classification \arg Accum with a field 529 /// classification \arg Field. 530 /// 531 /// \param Accum - The accumulating classification. This should 532 /// always be either NoClass or the result of a previous merge 533 /// call. In addition, this should never be Memory (the caller 534 /// should just return Memory for the aggregate). 535 Class merge(Class Accum, Class Field) const; 536 537 /// classify - Determine the x86_64 register classes in which the 538 /// given type T should be passed. 539 /// 540 /// \param Lo - The classification for the parts of the type 541 /// residing in the low word of the containing object. 542 /// 543 /// \param Hi - The classification for the parts of the type 544 /// residing in the high word of the containing object. 545 /// 546 /// \param OffsetBase - The bit offset of this type in the 547 /// containing object. Some parameters are classified different 548 /// depending on whether they straddle an eightbyte boundary. 549 /// 550 /// If a word is unused its result will be NoClass; if a type should 551 /// be passed in Memory then at least the classification of \arg Lo 552 /// will be Memory. 553 /// 554 /// The \arg Lo class will be NoClass iff the argument is ignored. 555 /// 556 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 557 /// also be ComplexX87. 558 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, 559 Class &Lo, Class &Hi) const; 560 561 /// getCoerceResult - Given a source type \arg Ty and an LLVM type 562 /// to coerce to, chose the best way to pass Ty in the same place 563 /// that \arg CoerceTo would be passed, but while keeping the 564 /// emitted code as simple as possible. 565 /// 566 /// FIXME: Note, this should be cleaned up to just take an enumeration of all 567 /// the ways we might want to pass things, instead of constructing an LLVM 568 /// type. This makes this code more explicit, and it makes it clearer that we 569 /// are also doing this for correctness in the case of passing scalar types. 570 ABIArgInfo getCoerceResult(QualType Ty, 571 const llvm::Type *CoerceTo, 572 ASTContext &Context) const; 573 574 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 575 /// such that the argument will be passed in memory. 576 ABIArgInfo getIndirectResult(QualType Ty, 577 ASTContext &Context) const; 578 579 ABIArgInfo classifyReturnType(QualType RetTy, 580 ASTContext &Context) const; 581 582 ABIArgInfo classifyArgumentType(QualType Ty, 583 ASTContext &Context, 584 unsigned &neededInt, 585 unsigned &neededSSE) const; 586 587public: 588 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 589 590 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 591 CodeGenFunction &CGF) const; 592}; 593} 594 595X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, 596 Class Field) const { 597 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 598 // classified recursively so that always two fields are 599 // considered. The resulting class is calculated according to 600 // the classes of the fields in the eightbyte: 601 // 602 // (a) If both classes are equal, this is the resulting class. 603 // 604 // (b) If one of the classes is NO_CLASS, the resulting class is 605 // the other class. 606 // 607 // (c) If one of the classes is MEMORY, the result is the MEMORY 608 // class. 609 // 610 // (d) If one of the classes is INTEGER, the result is the 611 // INTEGER. 612 // 613 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 614 // MEMORY is used as class. 615 // 616 // (f) Otherwise class SSE is used. 617 618 // Accum should never be memory (we should have returned) or 619 // ComplexX87 (because this cannot be passed in a structure). 620 assert((Accum != Memory && Accum != ComplexX87) && 621 "Invalid accumulated classification during merge."); 622 if (Accum == Field || Field == NoClass) 623 return Accum; 624 else if (Field == Memory) 625 return Memory; 626 else if (Accum == NoClass) 627 return Field; 628 else if (Accum == Integer || Field == Integer) 629 return Integer; 630 else if (Field == X87 || Field == X87Up || Field == ComplexX87 || 631 Accum == X87 || Accum == X87Up) 632 return Memory; 633 else 634 return SSE; 635} 636 637void X86_64ABIInfo::classify(QualType Ty, 638 ASTContext &Context, 639 uint64_t OffsetBase, 640 Class &Lo, Class &Hi) const { 641 // FIXME: This code can be simplified by introducing a simple value class for 642 // Class pairs with appropriate constructor methods for the various 643 // situations. 644 645 // FIXME: Some of the split computations are wrong; unaligned vectors 646 // shouldn't be passed in registers for example, so there is no chance they 647 // can straddle an eightbyte. Verify & simplify. 648 649 Lo = Hi = NoClass; 650 651 Class &Current = OffsetBase < 64 ? Lo : Hi; 652 Current = Memory; 653 654 if (const BuiltinType *BT = Ty->getAsBuiltinType()) { 655 BuiltinType::Kind k = BT->getKind(); 656 657 if (k == BuiltinType::Void) { 658 Current = NoClass; 659 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 660 Lo = Integer; 661 Hi = Integer; 662 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 663 Current = Integer; 664 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 665 Current = SSE; 666 } else if (k == BuiltinType::LongDouble) { 667 Lo = X87; 668 Hi = X87Up; 669 } 670 // FIXME: _Decimal32 and _Decimal64 are SSE. 671 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 672 } else if (const EnumType *ET = Ty->getAsEnumType()) { 673 // Classify the underlying integer type. 674 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); 675 } else if (Ty->hasPointerRepresentation()) { 676 Current = Integer; 677 } else if (const VectorType *VT = Ty->getAsVectorType()) { 678 uint64_t Size = Context.getTypeSize(VT); 679 if (Size == 32) { 680 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 681 // float> as integer. 682 Current = Integer; 683 684 // If this type crosses an eightbyte boundary, it should be 685 // split. 686 uint64_t EB_Real = (OffsetBase) / 64; 687 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 688 if (EB_Real != EB_Imag) 689 Hi = Lo; 690 } else if (Size == 64) { 691 // gcc passes <1 x double> in memory. :( 692 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 693 return; 694 695 // gcc passes <1 x long long> as INTEGER. 696 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) 697 Current = Integer; 698 else 699 Current = SSE; 700 701 // If this type crosses an eightbyte boundary, it should be 702 // split. 703 if (OffsetBase && OffsetBase != 64) 704 Hi = Lo; 705 } else if (Size == 128) { 706 Lo = SSE; 707 Hi = SSEUp; 708 } 709 } else if (const ComplexType *CT = Ty->getAsComplexType()) { 710 QualType ET = Context.getCanonicalType(CT->getElementType()); 711 712 uint64_t Size = Context.getTypeSize(Ty); 713 if (ET->isIntegralType()) { 714 if (Size <= 64) 715 Current = Integer; 716 else if (Size <= 128) 717 Lo = Hi = Integer; 718 } else if (ET == Context.FloatTy) 719 Current = SSE; 720 else if (ET == Context.DoubleTy) 721 Lo = Hi = SSE; 722 else if (ET == Context.LongDoubleTy) 723 Current = ComplexX87; 724 725 // If this complex type crosses an eightbyte boundary then it 726 // should be split. 727 uint64_t EB_Real = (OffsetBase) / 64; 728 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; 729 if (Hi == NoClass && EB_Real != EB_Imag) 730 Hi = Lo; 731 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 732 // Arrays are treated like structures. 733 734 uint64_t Size = Context.getTypeSize(Ty); 735 736 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 737 // than two eightbytes, ..., it has class MEMORY. 738 if (Size > 128) 739 return; 740 741 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 742 // fields, it has class MEMORY. 743 // 744 // Only need to check alignment of array base. 745 if (OffsetBase % Context.getTypeAlign(AT->getElementType())) 746 return; 747 748 // Otherwise implement simplified merge. We could be smarter about 749 // this, but it isn't worth it and would be harder to verify. 750 Current = NoClass; 751 uint64_t EltSize = Context.getTypeSize(AT->getElementType()); 752 uint64_t ArraySize = AT->getSize().getZExtValue(); 753 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 754 Class FieldLo, FieldHi; 755 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); 756 Lo = merge(Lo, FieldLo); 757 Hi = merge(Hi, FieldHi); 758 if (Lo == Memory || Hi == Memory) 759 break; 760 } 761 762 // Do post merger cleanup (see below). Only case we worry about is Memory. 763 if (Hi == Memory) 764 Lo = Memory; 765 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 766 } else if (const RecordType *RT = Ty->getAsRecordType()) { 767 uint64_t Size = Context.getTypeSize(Ty); 768 769 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 770 // than two eightbytes, ..., it has class MEMORY. 771 if (Size > 128) 772 return; 773 774 const RecordDecl *RD = RT->getDecl(); 775 776 // Assume variable sized types are passed in memory. 777 if (RD->hasFlexibleArrayMember()) 778 return; 779 780 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 781 782 // Reset Lo class, this will be recomputed. 783 Current = NoClass; 784 unsigned idx = 0; 785 for (RecordDecl::field_iterator i = RD->field_begin(Context), 786 e = RD->field_end(Context); i != e; ++i, ++idx) { 787 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 788 bool BitField = i->isBitField(); 789 790 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 791 // fields, it has class MEMORY. 792 // 793 // Note, skip this test for bit-fields, see below. 794 if (!BitField && Offset % Context.getTypeAlign(i->getType())) { 795 Lo = Memory; 796 return; 797 } 798 799 // Classify this field. 800 // 801 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 802 // exceeds a single eightbyte, each is classified 803 // separately. Each eightbyte gets initialized to class 804 // NO_CLASS. 805 Class FieldLo, FieldHi; 806 807 // Bit-fields require special handling, they do not force the 808 // structure to be passed in memory even if unaligned, and 809 // therefore they can straddle an eightbyte. 810 if (BitField) { 811 // Ignore padding bit-fields. 812 if (i->isUnnamedBitfield()) 813 continue; 814 815 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 816 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue(); 817 818 uint64_t EB_Lo = Offset / 64; 819 uint64_t EB_Hi = (Offset + Size - 1) / 64; 820 FieldLo = FieldHi = NoClass; 821 if (EB_Lo) { 822 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 823 FieldLo = NoClass; 824 FieldHi = Integer; 825 } else { 826 FieldLo = Integer; 827 FieldHi = EB_Hi ? Integer : NoClass; 828 } 829 } else 830 classify(i->getType(), Context, Offset, FieldLo, FieldHi); 831 Lo = merge(Lo, FieldLo); 832 Hi = merge(Hi, FieldHi); 833 if (Lo == Memory || Hi == Memory) 834 break; 835 } 836 837 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 838 // 839 // (a) If one of the classes is MEMORY, the whole argument is 840 // passed in memory. 841 // 842 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. 843 844 // The first of these conditions is guaranteed by how we implement 845 // the merge (just bail). 846 // 847 // The second condition occurs in the case of unions; for example 848 // union { _Complex double; unsigned; }. 849 if (Hi == Memory) 850 Lo = Memory; 851 if (Hi == SSEUp && Lo != SSE) 852 Hi = SSE; 853 } 854} 855 856ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, 857 const llvm::Type *CoerceTo, 858 ASTContext &Context) const { 859 if (CoerceTo == llvm::Type::Int64Ty) { 860 // Integer and pointer types will end up in a general purpose 861 // register. 862 if (Ty->isIntegralType() || Ty->isPointerType()) 863 return ABIArgInfo::getDirect(); 864 865 } else if (CoerceTo == llvm::Type::DoubleTy) { 866 // FIXME: It would probably be better to make CGFunctionInfo only map using 867 // canonical types than to canonize here. 868 QualType CTy = Context.getCanonicalType(Ty); 869 870 // Float and double end up in a single SSE reg. 871 if (CTy == Context.FloatTy || CTy == Context.DoubleTy) 872 return ABIArgInfo::getDirect(); 873 874 } 875 876 return ABIArgInfo::getCoerce(CoerceTo); 877} 878 879ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 880 ASTContext &Context) const { 881 // If this is a scalar LLVM value then assume LLVM will pass it in the right 882 // place naturally. 883 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) 884 return ABIArgInfo::getDirect(); 885 886 // FIXME: Set alignment correctly. 887 return ABIArgInfo::getIndirect(0); 888} 889 890ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, 891 ASTContext &Context) const { 892 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 893 // classification algorithm. 894 X86_64ABIInfo::Class Lo, Hi; 895 classify(RetTy, Context, 0, Lo, Hi); 896 897 // Check some invariants. 898 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 899 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 900 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 901 902 const llvm::Type *ResType = 0; 903 switch (Lo) { 904 case NoClass: 905 return ABIArgInfo::getIgnore(); 906 907 case SSEUp: 908 case X87Up: 909 assert(0 && "Invalid classification for lo word."); 910 911 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 912 // hidden argument. 913 case Memory: 914 return getIndirectResult(RetTy, Context); 915 916 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 917 // available register of the sequence %rax, %rdx is used. 918 case Integer: 919 ResType = llvm::Type::Int64Ty; break; 920 921 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 922 // available SSE register of the sequence %xmm0, %xmm1 is used. 923 case SSE: 924 ResType = llvm::Type::DoubleTy; break; 925 926 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 927 // returned on the X87 stack in %st0 as 80-bit x87 number. 928 case X87: 929 ResType = llvm::Type::X86_FP80Ty; break; 930 931 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 932 // part of the value is returned in %st0 and the imaginary part in 933 // %st1. 934 case ComplexX87: 935 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 936 ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty, 937 llvm::Type::X86_FP80Ty, 938 NULL); 939 break; 940 } 941 942 switch (Hi) { 943 // Memory was handled previously and X87 should 944 // never occur as a hi class. 945 case Memory: 946 case X87: 947 assert(0 && "Invalid classification for hi word."); 948 949 case ComplexX87: // Previously handled. 950 case NoClass: break; 951 952 case Integer: 953 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 954 break; 955 case SSE: 956 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 957 break; 958 959 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 960 // is passed in the upper half of the last used SSE register. 961 // 962 // SSEUP should always be preceeded by SSE, just widen. 963 case SSEUp: 964 assert(Lo == SSE && "Unexpected SSEUp classification."); 965 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 966 break; 967 968 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 969 // returned together with the previous X87 value in %st0. 970 case X87Up: 971 // If X87Up is preceeded by X87, we don't need to do 972 // anything. However, in some cases with unions it may not be 973 // preceeded by X87. In such situations we follow gcc and pass the 974 // extra bits in an SSE reg. 975 if (Lo != X87) 976 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 977 break; 978 } 979 980 return getCoerceResult(RetTy, ResType, Context); 981} 982 983ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, 984 unsigned &neededInt, 985 unsigned &neededSSE) const { 986 X86_64ABIInfo::Class Lo, Hi; 987 classify(Ty, Context, 0, Lo, Hi); 988 989 // Check some invariants. 990 // FIXME: Enforce these by construction. 991 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 992 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); 993 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 994 995 neededInt = 0; 996 neededSSE = 0; 997 const llvm::Type *ResType = 0; 998 switch (Lo) { 999 case NoClass: 1000 return ABIArgInfo::getIgnore(); 1001 1002 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1003 // on the stack. 1004 case Memory: 1005 1006 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1007 // COMPLEX_X87, it is passed in memory. 1008 case X87: 1009 case ComplexX87: 1010 return getIndirectResult(Ty, Context); 1011 1012 case SSEUp: 1013 case X87Up: 1014 assert(0 && "Invalid classification for lo word."); 1015 1016 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1017 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1018 // and %r9 is used. 1019 case Integer: 1020 ++neededInt; 1021 ResType = llvm::Type::Int64Ty; 1022 break; 1023 1024 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1025 // available SSE register is used, the registers are taken in the 1026 // order from %xmm0 to %xmm7. 1027 case SSE: 1028 ++neededSSE; 1029 ResType = llvm::Type::DoubleTy; 1030 break; 1031 } 1032 1033 switch (Hi) { 1034 // Memory was handled previously, ComplexX87 and X87 should 1035 // never occur as hi classes, and X87Up must be preceed by X87, 1036 // which is passed in memory. 1037 case Memory: 1038 case X87: 1039 case ComplexX87: 1040 assert(0 && "Invalid classification for hi word."); 1041 break; 1042 1043 case NoClass: break; 1044 case Integer: 1045 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); 1046 ++neededInt; 1047 break; 1048 1049 // X87Up generally doesn't occur here (long double is passed in 1050 // memory), except in situations involving unions. 1051 case X87Up: 1052 case SSE: 1053 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); 1054 ++neededSSE; 1055 break; 1056 1057 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1058 // eightbyte is passed in the upper half of the last used SSE 1059 // register. 1060 case SSEUp: 1061 assert(Lo == SSE && "Unexpected SSEUp classification."); 1062 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); 1063 break; 1064 } 1065 1066 return getCoerceResult(Ty, ResType, Context); 1067} 1068 1069void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1070 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1071 1072 // Keep track of the number of assigned registers. 1073 unsigned freeIntRegs = 6, freeSSERegs = 8; 1074 1075 // If the return value is indirect, then the hidden argument is consuming one 1076 // integer register. 1077 if (FI.getReturnInfo().isIndirect()) 1078 --freeIntRegs; 1079 1080 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1081 // get assigned (in left-to-right order) for passing as follows... 1082 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1083 it != ie; ++it) { 1084 unsigned neededInt, neededSSE; 1085 it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE); 1086 1087 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1088 // eightbyte of an argument, the whole argument is passed on the 1089 // stack. If registers have already been assigned for some 1090 // eightbytes of such an argument, the assignments get reverted. 1091 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1092 freeIntRegs -= neededInt; 1093 freeSSERegs -= neededSSE; 1094 } else { 1095 it->info = getIndirectResult(it->type, Context); 1096 } 1097 } 1098} 1099 1100static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1101 QualType Ty, 1102 CodeGenFunction &CGF) { 1103 llvm::Value *overflow_arg_area_p = 1104 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1105 llvm::Value *overflow_arg_area = 1106 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1107 1108 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1109 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1110 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1111 if (Align > 8) { 1112 // Note that we follow the ABI & gcc here, even though the type 1113 // could in theory have an alignment greater than 16. This case 1114 // shouldn't ever matter in practice. 1115 1116 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1117 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15); 1118 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1119 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1120 llvm::Type::Int64Ty); 1121 llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL); 1122 overflow_arg_area = 1123 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1124 overflow_arg_area->getType(), 1125 "overflow_arg_area.align"); 1126 } 1127 1128 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1129 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1130 llvm::Value *Res = 1131 CGF.Builder.CreateBitCast(overflow_arg_area, 1132 llvm::PointerType::getUnqual(LTy)); 1133 1134 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1135 // l->overflow_arg_area + sizeof(type). 1136 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1137 // an 8 byte boundary. 1138 1139 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1140 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1141 (SizeInBytes + 7) & ~7); 1142 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1143 "overflow_arg_area.next"); 1144 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1145 1146 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1147 return Res; 1148} 1149 1150llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1151 CodeGenFunction &CGF) const { 1152 // Assume that va_list type is correct; should be pointer to LLVM type: 1153 // struct { 1154 // i32 gp_offset; 1155 // i32 fp_offset; 1156 // i8* overflow_arg_area; 1157 // i8* reg_save_area; 1158 // }; 1159 unsigned neededInt, neededSSE; 1160 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), 1161 neededInt, neededSSE); 1162 1163 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 1164 // in the registers. If not go to step 7. 1165 if (!neededInt && !neededSSE) 1166 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1167 1168 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 1169 // general purpose registers needed to pass type and num_fp to hold 1170 // the number of floating point registers needed. 1171 1172 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 1173 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 1174 // l->fp_offset > 304 - num_fp * 16 go to step 7. 1175 // 1176 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 1177 // register save space). 1178 1179 llvm::Value *InRegs = 0; 1180 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 1181 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 1182 if (neededInt) { 1183 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 1184 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 1185 InRegs = 1186 CGF.Builder.CreateICmpULE(gp_offset, 1187 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1188 48 - neededInt * 8), 1189 "fits_in_gp"); 1190 } 1191 1192 if (neededSSE) { 1193 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 1194 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 1195 llvm::Value *FitsInFP = 1196 CGF.Builder.CreateICmpULE(fp_offset, 1197 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1198 176 - neededSSE * 16), 1199 "fits_in_fp"); 1200 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 1201 } 1202 1203 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 1204 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 1205 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 1206 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 1207 1208 // Emit code to load the value if it was passed in registers. 1209 1210 CGF.EmitBlock(InRegBlock); 1211 1212 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 1213 // an offset of l->gp_offset and/or l->fp_offset. This may require 1214 // copying to a temporary location in case the parameter is passed 1215 // in different register classes or requires an alignment greater 1216 // than 8 for general purpose registers and 16 for XMM registers. 1217 // 1218 // FIXME: This really results in shameful code when we end up needing to 1219 // collect arguments from different places; often what should result in a 1220 // simple assembling of a structure from scattered addresses has many more 1221 // loads than necessary. Can we clean this up? 1222 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1223 llvm::Value *RegAddr = 1224 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 1225 "reg_save_area"); 1226 if (neededInt && neededSSE) { 1227 // FIXME: Cleanup. 1228 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs"); 1229 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 1230 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 1231 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 1232 const llvm::Type *TyLo = ST->getElementType(0); 1233 const llvm::Type *TyHi = ST->getElementType(1); 1234 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) && 1235 "Unexpected ABI info for mixed regs"); 1236 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 1237 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 1238 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1239 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1240 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr; 1241 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr; 1242 llvm::Value *V = 1243 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 1244 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1245 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 1246 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1247 1248 RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); 1249 } else if (neededInt) { 1250 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 1251 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1252 llvm::PointerType::getUnqual(LTy)); 1253 } else { 1254 if (neededSSE == 1) { 1255 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1256 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 1257 llvm::PointerType::getUnqual(LTy)); 1258 } else { 1259 assert(neededSSE == 2 && "Invalid number of needed registers!"); 1260 // SSE registers are spaced 16 bytes apart in the register save 1261 // area, we need to collect the two eightbytes together. 1262 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 1263 llvm::Value *RegAddrHi = 1264 CGF.Builder.CreateGEP(RegAddrLo, 1265 llvm::ConstantInt::get(llvm::Type::Int32Ty, 16)); 1266 const llvm::Type *DblPtrTy = 1267 llvm::PointerType::getUnqual(llvm::Type::DoubleTy); 1268 const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy, 1269 llvm::Type::DoubleTy, 1270 NULL); 1271 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 1272 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 1273 DblPtrTy)); 1274 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 1275 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 1276 DblPtrTy)); 1277 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 1278 RegAddr = CGF.Builder.CreateBitCast(Tmp, 1279 llvm::PointerType::getUnqual(LTy)); 1280 } 1281 } 1282 1283 // AMD64-ABI 3.5.7p5: Step 5. Set: 1284 // l->gp_offset = l->gp_offset + num_gp * 8 1285 // l->fp_offset = l->fp_offset + num_fp * 16. 1286 if (neededInt) { 1287 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1288 neededInt * 8); 1289 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 1290 gp_offset_p); 1291 } 1292 if (neededSSE) { 1293 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1294 neededSSE * 16); 1295 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 1296 fp_offset_p); 1297 } 1298 CGF.EmitBranch(ContBlock); 1299 1300 // Emit code to load the value if it was passed in memory. 1301 1302 CGF.EmitBlock(InMemBlock); 1303 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 1304 1305 // Return the appropriate result. 1306 1307 CGF.EmitBlock(ContBlock); 1308 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 1309 "vaarg.addr"); 1310 ResAddr->reserveOperandSpace(2); 1311 ResAddr->addIncoming(RegAddr, InRegBlock); 1312 ResAddr->addIncoming(MemAddr, InMemBlock); 1313 1314 return ResAddr; 1315} 1316 1317// ABI Info for PIC16 1318class PIC16ABIInfo : public ABIInfo { 1319 ABIArgInfo classifyReturnType(QualType RetTy, 1320 ASTContext &Context) const; 1321 1322 ABIArgInfo classifyArgumentType(QualType RetTy, 1323 ASTContext &Context) const; 1324 1325 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1326 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1327 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1328 it != ie; ++it) 1329 it->info = classifyArgumentType(it->type, Context); 1330 } 1331 1332 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1333 CodeGenFunction &CGF) const; 1334 1335}; 1336 1337ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy, 1338 ASTContext &Context) const { 1339 if (RetTy->isVoidType()) { 1340 return ABIArgInfo::getIgnore(); 1341 } else { 1342 return ABIArgInfo::getDirect(); 1343 } 1344} 1345 1346ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty, 1347 ASTContext &Context) const { 1348 return ABIArgInfo::getDirect(); 1349} 1350 1351llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1352 CodeGenFunction &CGF) const { 1353 return 0; 1354} 1355 1356class ARMABIInfo : public ABIInfo { 1357 ABIArgInfo classifyReturnType(QualType RetTy, 1358 ASTContext &Context) const; 1359 1360 ABIArgInfo classifyArgumentType(QualType RetTy, 1361 ASTContext &Context) const; 1362 1363 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; 1364 1365 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1366 CodeGenFunction &CGF) const; 1367}; 1368 1369void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { 1370 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); 1371 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1372 it != ie; ++it) { 1373 it->info = classifyArgumentType(it->type, Context); 1374 } 1375} 1376 1377ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 1378 ASTContext &Context) const { 1379 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1380 return ABIArgInfo::getDirect(); 1381 } 1382 // FIXME: This is kind of nasty... but there isn't much choice because the ARM 1383 // backend doesn't support byval. 1384 // FIXME: This doesn't handle alignment > 64 bits. 1385 const llvm::Type* ElemTy; 1386 unsigned SizeRegs; 1387 if (Context.getTypeAlign(Ty) > 32) { 1388 ElemTy = llvm::Type::Int64Ty; 1389 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64; 1390 } else { 1391 ElemTy = llvm::Type::Int32Ty; 1392 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32; 1393 } 1394 std::vector<const llvm::Type*> LLVMFields; 1395 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); 1396 const llvm::Type* STy = llvm::StructType::get(LLVMFields, true); 1397 return ABIArgInfo::getCoerce(STy); 1398} 1399 1400ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 1401 ASTContext &Context) const { 1402 if (RetTy->isVoidType()) { 1403 return ABIArgInfo::getIgnore(); 1404 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1405 // Aggregates <= 4 bytes are returned in r0; other aggregates 1406 // are returned indirectly. 1407 uint64_t Size = Context.getTypeSize(RetTy); 1408 if (Size <= 32) 1409 return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); 1410 return ABIArgInfo::getIndirect(0); 1411 } else { 1412 return ABIArgInfo::getDirect(); 1413 } 1414} 1415 1416llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1417 CodeGenFunction &CGF) const { 1418 // FIXME: Need to handle alignment 1419 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 1420 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 1421 1422 CGBuilderTy &Builder = CGF.Builder; 1423 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 1424 "ap"); 1425 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 1426 llvm::Type *PTy = 1427 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 1428 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 1429 1430 uint64_t Offset = 1431 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 1432 llvm::Value *NextAddr = 1433 Builder.CreateGEP(Addr, 1434 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), 1435 "ap.next"); 1436 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 1437 1438 return AddrTyped; 1439} 1440 1441ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, 1442 ASTContext &Context) const { 1443 if (RetTy->isVoidType()) { 1444 return ABIArgInfo::getIgnore(); 1445 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1446 return ABIArgInfo::getIndirect(0); 1447 } else { 1448 return ABIArgInfo::getDirect(); 1449 } 1450} 1451 1452ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, 1453 ASTContext &Context) const { 1454 if (CodeGenFunction::hasAggregateLLVMType(Ty)) { 1455 return ABIArgInfo::getIndirect(0); 1456 } else { 1457 return ABIArgInfo::getDirect(); 1458 } 1459} 1460 1461llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1462 CodeGenFunction &CGF) const { 1463 return 0; 1464} 1465 1466const ABIInfo &CodeGenTypes::getABIInfo() const { 1467 if (TheABIInfo) 1468 return *TheABIInfo; 1469 1470 // For now we just cache this in the CodeGenTypes and don't bother 1471 // to free it. 1472 const char *TargetPrefix = getContext().Target.getTargetPrefix(); 1473 if (strcmp(TargetPrefix, "x86") == 0) { 1474 bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin"); 1475 switch (getContext().Target.getPointerWidth(0)) { 1476 case 32: 1477 return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin)); 1478 case 64: 1479 return *(TheABIInfo = new X86_64ABIInfo()); 1480 } 1481 } else if (strcmp(TargetPrefix, "arm") == 0) { 1482 // FIXME: Support for OABI? 1483 return *(TheABIInfo = new ARMABIInfo()); 1484 } else if (strcmp(TargetPrefix, "pic16") == 0) { 1485 return *(TheABIInfo = new PIC16ABIInfo()); 1486 } 1487 1488 return *(TheABIInfo = new DefaultABIInfo); 1489} 1490 1491/***/ 1492 1493CGFunctionInfo::CGFunctionInfo(QualType ResTy, 1494 const llvm::SmallVector<QualType, 16> &ArgTys) { 1495 NumArgs = ArgTys.size(); 1496 Args = new ArgInfo[1 + NumArgs]; 1497 Args[0].type = ResTy; 1498 for (unsigned i = 0; i < NumArgs; ++i) 1499 Args[1 + i].type = ArgTys[i]; 1500} 1501 1502/***/ 1503 1504void CodeGenTypes::GetExpandedTypes(QualType Ty, 1505 std::vector<const llvm::Type*> &ArgTys) { 1506 const RecordType *RT = Ty->getAsStructureType(); 1507 assert(RT && "Can only expand structure types."); 1508 const RecordDecl *RD = RT->getDecl(); 1509 assert(!RD->hasFlexibleArrayMember() && 1510 "Cannot expand structure with flexible array."); 1511 1512 for (RecordDecl::field_iterator i = RD->field_begin(Context), 1513 e = RD->field_end(Context); i != e; ++i) { 1514 const FieldDecl *FD = *i; 1515 assert(!FD->isBitField() && 1516 "Cannot expand structure with bit-field members."); 1517 1518 QualType FT = FD->getType(); 1519 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1520 GetExpandedTypes(FT, ArgTys); 1521 } else { 1522 ArgTys.push_back(ConvertType(FT)); 1523 } 1524 } 1525} 1526 1527llvm::Function::arg_iterator 1528CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1529 llvm::Function::arg_iterator AI) { 1530 const RecordType *RT = Ty->getAsStructureType(); 1531 assert(RT && "Can only expand structure types."); 1532 1533 RecordDecl *RD = RT->getDecl(); 1534 assert(LV.isSimple() && 1535 "Unexpected non-simple lvalue during struct expansion."); 1536 llvm::Value *Addr = LV.getAddress(); 1537 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1538 e = RD->field_end(getContext()); i != e; ++i) { 1539 FieldDecl *FD = *i; 1540 QualType FT = FD->getType(); 1541 1542 // FIXME: What are the right qualifiers here? 1543 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1544 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1545 AI = ExpandTypeFromArgs(FT, LV, AI); 1546 } else { 1547 EmitStoreThroughLValue(RValue::get(AI), LV, FT); 1548 ++AI; 1549 } 1550 } 1551 1552 return AI; 1553} 1554 1555void 1556CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 1557 llvm::SmallVector<llvm::Value*, 16> &Args) { 1558 const RecordType *RT = Ty->getAsStructureType(); 1559 assert(RT && "Can only expand structure types."); 1560 1561 RecordDecl *RD = RT->getDecl(); 1562 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 1563 llvm::Value *Addr = RV.getAggregateAddr(); 1564 for (RecordDecl::field_iterator i = RD->field_begin(getContext()), 1565 e = RD->field_end(getContext()); i != e; ++i) { 1566 FieldDecl *FD = *i; 1567 QualType FT = FD->getType(); 1568 1569 // FIXME: What are the right qualifiers here? 1570 LValue LV = EmitLValueForField(Addr, FD, false, 0); 1571 if (CodeGenFunction::hasAggregateLLVMType(FT)) { 1572 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); 1573 } else { 1574 RValue RV = EmitLoadOfLValue(LV, FT); 1575 assert(RV.isScalar() && 1576 "Unexpected non-scalar rvalue during struct expansion."); 1577 Args.push_back(RV.getScalarVal()); 1578 } 1579 } 1580} 1581 1582/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1583/// a pointer to an object of type \arg Ty. 1584/// 1585/// This safely handles the case when the src type is smaller than the 1586/// destination type; in this situation the values of bits which not 1587/// present in the src are undefined. 1588static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 1589 const llvm::Type *Ty, 1590 CodeGenFunction &CGF) { 1591 const llvm::Type *SrcTy = 1592 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 1593 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 1594 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); 1595 1596 // If load is legal, just bitcast the src pointer. 1597 if (SrcSize >= DstSize) { 1598 // Generally SrcSize is never greater than DstSize, since this means we are 1599 // losing bits. However, this can happen in cases where the structure has 1600 // additional padding, for example due to a user specified alignment. 1601 // 1602 // FIXME: Assert that we aren't truncating non-padding bits when have access 1603 // to that information. 1604 llvm::Value *Casted = 1605 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 1606 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1607 // FIXME: Use better alignment / avoid requiring aligned load. 1608 Load->setAlignment(1); 1609 return Load; 1610 } else { 1611 // Otherwise do coercion through memory. This is stupid, but 1612 // simple. 1613 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 1614 llvm::Value *Casted = 1615 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); 1616 llvm::StoreInst *Store = 1617 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); 1618 // FIXME: Use better alignment / avoid requiring aligned store. 1619 Store->setAlignment(1); 1620 return CGF.Builder.CreateLoad(Tmp); 1621 } 1622} 1623 1624/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1625/// where the source and destination may have different types. 1626/// 1627/// This safely handles the case when the src type is larger than the 1628/// destination type; the upper bits of the src will be lost. 1629static void CreateCoercedStore(llvm::Value *Src, 1630 llvm::Value *DstPtr, 1631 CodeGenFunction &CGF) { 1632 const llvm::Type *SrcTy = Src->getType(); 1633 const llvm::Type *DstTy = 1634 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 1635 1636 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); 1637 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); 1638 1639 // If store is legal, just bitcast the src pointer. 1640 if (SrcSize >= DstSize) { 1641 // Generally SrcSize is never greater than DstSize, since this means we are 1642 // losing bits. However, this can happen in cases where the structure has 1643 // additional padding, for example due to a user specified alignment. 1644 // 1645 // FIXME: Assert that we aren't truncating non-padding bits when have access 1646 // to that information. 1647 llvm::Value *Casted = 1648 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 1649 // FIXME: Use better alignment / avoid requiring aligned store. 1650 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1); 1651 } else { 1652 // Otherwise do coercion through memory. This is stupid, but 1653 // simple. 1654 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 1655 CGF.Builder.CreateStore(Src, Tmp); 1656 llvm::Value *Casted = 1657 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); 1658 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 1659 // FIXME: Use better alignment / avoid requiring aligned load. 1660 Load->setAlignment(1); 1661 CGF.Builder.CreateStore(Load, DstPtr); 1662 } 1663} 1664 1665/***/ 1666 1667bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { 1668 return FI.getReturnInfo().isIndirect(); 1669} 1670 1671const llvm::FunctionType * 1672CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { 1673 std::vector<const llvm::Type*> ArgTys; 1674 1675 const llvm::Type *ResultType = 0; 1676 1677 QualType RetTy = FI.getReturnType(); 1678 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1679 switch (RetAI.getKind()) { 1680 case ABIArgInfo::Expand: 1681 assert(0 && "Invalid ABI kind for return argument"); 1682 1683 case ABIArgInfo::Direct: 1684 ResultType = ConvertType(RetTy); 1685 break; 1686 1687 case ABIArgInfo::Indirect: { 1688 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); 1689 ResultType = llvm::Type::VoidTy; 1690 const llvm::Type *STy = ConvertType(RetTy); 1691 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); 1692 break; 1693 } 1694 1695 case ABIArgInfo::Ignore: 1696 ResultType = llvm::Type::VoidTy; 1697 break; 1698 1699 case ABIArgInfo::Coerce: 1700 ResultType = RetAI.getCoerceToType(); 1701 break; 1702 } 1703 1704 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1705 ie = FI.arg_end(); it != ie; ++it) { 1706 const ABIArgInfo &AI = it->info; 1707 1708 switch (AI.getKind()) { 1709 case ABIArgInfo::Ignore: 1710 break; 1711 1712 case ABIArgInfo::Coerce: 1713 ArgTys.push_back(AI.getCoerceToType()); 1714 break; 1715 1716 case ABIArgInfo::Indirect: { 1717 // indirect arguments are always on the stack, which is addr space #0. 1718 const llvm::Type *LTy = ConvertTypeForMem(it->type); 1719 ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); 1720 break; 1721 } 1722 1723 case ABIArgInfo::Direct: 1724 ArgTys.push_back(ConvertType(it->type)); 1725 break; 1726 1727 case ABIArgInfo::Expand: 1728 GetExpandedTypes(it->type, ArgTys); 1729 break; 1730 } 1731 } 1732 1733 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); 1734} 1735 1736void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1737 const Decl *TargetDecl, 1738 AttributeListType &PAL) { 1739 unsigned FuncAttrs = 0; 1740 unsigned RetAttrs = 0; 1741 1742 // FIXME: handle sseregparm someday... 1743 if (TargetDecl) { 1744 if (TargetDecl->hasAttr<NoThrowAttr>()) 1745 FuncAttrs |= llvm::Attribute::NoUnwind; 1746 if (TargetDecl->hasAttr<NoReturnAttr>()) 1747 FuncAttrs |= llvm::Attribute::NoReturn; 1748 if (TargetDecl->hasAttr<ConstAttr>()) 1749 FuncAttrs |= llvm::Attribute::ReadNone; 1750 else if (TargetDecl->hasAttr<PureAttr>()) 1751 FuncAttrs |= llvm::Attribute::ReadOnly; 1752 } 1753 1754 QualType RetTy = FI.getReturnType(); 1755 unsigned Index = 1; 1756 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1757 switch (RetAI.getKind()) { 1758 case ABIArgInfo::Direct: 1759 if (RetTy->isPromotableIntegerType()) { 1760 if (RetTy->isSignedIntegerType()) { 1761 RetAttrs |= llvm::Attribute::SExt; 1762 } else if (RetTy->isUnsignedIntegerType()) { 1763 RetAttrs |= llvm::Attribute::ZExt; 1764 } 1765 } 1766 break; 1767 1768 case ABIArgInfo::Indirect: 1769 PAL.push_back(llvm::AttributeWithIndex::get(Index, 1770 llvm::Attribute::StructRet | 1771 llvm::Attribute::NoAlias)); 1772 ++Index; 1773 // sret disables readnone and readonly 1774 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1775 llvm::Attribute::ReadNone); 1776 break; 1777 1778 case ABIArgInfo::Ignore: 1779 case ABIArgInfo::Coerce: 1780 break; 1781 1782 case ABIArgInfo::Expand: 1783 assert(0 && "Invalid ABI kind for return argument"); 1784 } 1785 1786 if (RetAttrs) 1787 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); 1788 1789 // FIXME: we need to honour command line settings also... 1790 // FIXME: RegParm should be reduced in case of nested functions and/or global 1791 // register variable. 1792 signed RegParm = 0; 1793 if (TargetDecl) 1794 if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>()) 1795 RegParm = RegParmAttr->getNumParams(); 1796 1797 unsigned PointerWidth = getContext().Target.getPointerWidth(0); 1798 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1799 ie = FI.arg_end(); it != ie; ++it) { 1800 QualType ParamType = it->type; 1801 const ABIArgInfo &AI = it->info; 1802 unsigned Attributes = 0; 1803 1804 switch (AI.getKind()) { 1805 case ABIArgInfo::Coerce: 1806 break; 1807 1808 case ABIArgInfo::Indirect: 1809 Attributes |= llvm::Attribute::ByVal; 1810 Attributes |= 1811 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); 1812 // byval disables readnone and readonly. 1813 FuncAttrs &= ~(llvm::Attribute::ReadOnly | 1814 llvm::Attribute::ReadNone); 1815 break; 1816 1817 case ABIArgInfo::Direct: 1818 if (ParamType->isPromotableIntegerType()) { 1819 if (ParamType->isSignedIntegerType()) { 1820 Attributes |= llvm::Attribute::SExt; 1821 } else if (ParamType->isUnsignedIntegerType()) { 1822 Attributes |= llvm::Attribute::ZExt; 1823 } 1824 } 1825 if (RegParm > 0 && 1826 (ParamType->isIntegerType() || ParamType->isPointerType())) { 1827 RegParm -= 1828 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; 1829 if (RegParm >= 0) 1830 Attributes |= llvm::Attribute::InReg; 1831 } 1832 // FIXME: handle sseregparm someday... 1833 break; 1834 1835 case ABIArgInfo::Ignore: 1836 // Skip increment, no matching LLVM parameter. 1837 continue; 1838 1839 case ABIArgInfo::Expand: { 1840 std::vector<const llvm::Type*> Tys; 1841 // FIXME: This is rather inefficient. Do we ever actually need to do 1842 // anything here? The result should be just reconstructed on the other 1843 // side, so extension should be a non-issue. 1844 getTypes().GetExpandedTypes(ParamType, Tys); 1845 Index += Tys.size(); 1846 continue; 1847 } 1848 } 1849 1850 if (Attributes) 1851 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); 1852 ++Index; 1853 } 1854 if (FuncAttrs) 1855 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); 1856} 1857 1858void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1859 llvm::Function *Fn, 1860 const FunctionArgList &Args) { 1861 // FIXME: We no longer need the types from FunctionArgList; lift up and 1862 // simplify. 1863 1864 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1865 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1866 1867 // Name the struct return argument. 1868 if (CGM.ReturnTypeUsesSret(FI)) { 1869 AI->setName("agg.result"); 1870 ++AI; 1871 } 1872 1873 assert(FI.arg_size() == Args.size() && 1874 "Mismatch between function signature & arguments."); 1875 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1876 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1877 i != e; ++i, ++info_it) { 1878 const VarDecl *Arg = i->first; 1879 QualType Ty = info_it->type; 1880 const ABIArgInfo &ArgI = info_it->info; 1881 1882 switch (ArgI.getKind()) { 1883 case ABIArgInfo::Indirect: { 1884 llvm::Value* V = AI; 1885 if (hasAggregateLLVMType(Ty)) { 1886 // Do nothing, aggregates and complex variables are accessed by 1887 // reference. 1888 } else { 1889 // Load scalar value from indirect argument. 1890 V = EmitLoadOfScalar(V, false, Ty); 1891 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1892 // This must be a promotion, for something like 1893 // "void a(x) short x; {..." 1894 V = EmitScalarConversion(V, Ty, Arg->getType()); 1895 } 1896 } 1897 EmitParmDecl(*Arg, V); 1898 break; 1899 } 1900 1901 case ABIArgInfo::Direct: { 1902 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1903 llvm::Value* V = AI; 1904 if (hasAggregateLLVMType(Ty)) { 1905 // Create a temporary alloca to hold the argument; the rest of 1906 // codegen expects to access aggregates & complex values by 1907 // reference. 1908 V = CreateTempAlloca(ConvertTypeForMem(Ty)); 1909 Builder.CreateStore(AI, V); 1910 } else { 1911 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1912 // This must be a promotion, for something like 1913 // "void a(x) short x; {..." 1914 V = EmitScalarConversion(V, Ty, Arg->getType()); 1915 } 1916 } 1917 EmitParmDecl(*Arg, V); 1918 break; 1919 } 1920 1921 case ABIArgInfo::Expand: { 1922 // If this structure was expanded into multiple arguments then 1923 // we need to create a temporary and reconstruct it from the 1924 // arguments. 1925 std::string Name = Arg->getNameAsString(); 1926 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), 1927 (Name + ".addr").c_str()); 1928 // FIXME: What are the right qualifiers here? 1929 llvm::Function::arg_iterator End = 1930 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); 1931 EmitParmDecl(*Arg, Temp); 1932 1933 // Name the arguments used in expansion and increment AI. 1934 unsigned Index = 0; 1935 for (; AI != End; ++AI, ++Index) 1936 AI->setName(Name + "." + llvm::utostr(Index)); 1937 continue; 1938 } 1939 1940 case ABIArgInfo::Ignore: 1941 // Initialize the local variable appropriately. 1942 if (hasAggregateLLVMType(Ty)) { 1943 EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); 1944 } else { 1945 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); 1946 } 1947 1948 // Skip increment, no matching LLVM parameter. 1949 continue; 1950 1951 case ABIArgInfo::Coerce: { 1952 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1953 // FIXME: This is very wasteful; EmitParmDecl is just going to drop the 1954 // result in a new alloca anyway, so we could just store into that 1955 // directly if we broke the abstraction down more. 1956 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce"); 1957 CreateCoercedStore(AI, V, *this); 1958 // Match to what EmitParmDecl is expecting for this type. 1959 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { 1960 V = EmitLoadOfScalar(V, false, Ty); 1961 if (!getContext().typesAreCompatible(Ty, Arg->getType())) { 1962 // This must be a promotion, for something like 1963 // "void a(x) short x; {..." 1964 V = EmitScalarConversion(V, Ty, Arg->getType()); 1965 } 1966 } 1967 EmitParmDecl(*Arg, V); 1968 break; 1969 } 1970 } 1971 1972 ++AI; 1973 } 1974 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1975} 1976 1977void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1978 llvm::Value *ReturnValue) { 1979 llvm::Value *RV = 0; 1980 1981 // Functions with no result always return void. 1982 if (ReturnValue) { 1983 QualType RetTy = FI.getReturnType(); 1984 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1985 1986 switch (RetAI.getKind()) { 1987 case ABIArgInfo::Indirect: 1988 if (RetTy->isAnyComplexType()) { 1989 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); 1990 StoreComplexToAddr(RT, CurFn->arg_begin(), false); 1991 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 1992 EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy); 1993 } else { 1994 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), 1995 false, RetTy); 1996 } 1997 break; 1998 1999 case ABIArgInfo::Direct: 2000 // The internal return value temp always will have 2001 // pointer-to-return-type type. 2002 RV = Builder.CreateLoad(ReturnValue); 2003 break; 2004 2005 case ABIArgInfo::Ignore: 2006 break; 2007 2008 case ABIArgInfo::Coerce: 2009 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); 2010 break; 2011 2012 case ABIArgInfo::Expand: 2013 assert(0 && "Invalid ABI kind for return argument"); 2014 } 2015 } 2016 2017 if (RV) { 2018 Builder.CreateRet(RV); 2019 } else { 2020 Builder.CreateRetVoid(); 2021 } 2022} 2023 2024RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { 2025 if (ArgType->isReferenceType()) 2026 return EmitReferenceBindingToExpr(E, ArgType); 2027 2028 return EmitAnyExprToTemp(E); 2029} 2030 2031RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2032 llvm::Value *Callee, 2033 const CallArgList &CallArgs, 2034 const Decl *TargetDecl) { 2035 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2036 llvm::SmallVector<llvm::Value*, 16> Args; 2037 2038 // Handle struct-return functions by passing a pointer to the 2039 // location that we would like to return into. 2040 QualType RetTy = CallInfo.getReturnType(); 2041 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2042 if (CGM.ReturnTypeUsesSret(CallInfo)) { 2043 // Create a temporary alloca to hold the result of the call. :( 2044 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); 2045 } 2046 2047 assert(CallInfo.arg_size() == CallArgs.size() && 2048 "Mismatch between function signature & arguments."); 2049 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2050 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2051 I != E; ++I, ++info_it) { 2052 const ABIArgInfo &ArgInfo = info_it->info; 2053 RValue RV = I->first; 2054 2055 switch (ArgInfo.getKind()) { 2056 case ABIArgInfo::Indirect: 2057 if (RV.isScalar() || RV.isComplex()) { 2058 // Make a temporary alloca to pass the argument. 2059 Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second))); 2060 if (RV.isScalar()) 2061 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); 2062 else 2063 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); 2064 } else { 2065 Args.push_back(RV.getAggregateAddr()); 2066 } 2067 break; 2068 2069 case ABIArgInfo::Direct: 2070 if (RV.isScalar()) { 2071 Args.push_back(RV.getScalarVal()); 2072 } else if (RV.isComplex()) { 2073 llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); 2074 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); 2075 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); 2076 Args.push_back(Tmp); 2077 } else { 2078 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); 2079 } 2080 break; 2081 2082 case ABIArgInfo::Ignore: 2083 break; 2084 2085 case ABIArgInfo::Coerce: { 2086 // FIXME: Avoid the conversion through memory if possible. 2087 llvm::Value *SrcPtr; 2088 if (RV.isScalar()) { 2089 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2090 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second); 2091 } else if (RV.isComplex()) { 2092 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); 2093 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); 2094 } else 2095 SrcPtr = RV.getAggregateAddr(); 2096 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2097 *this)); 2098 break; 2099 } 2100 2101 case ABIArgInfo::Expand: 2102 ExpandTypeToArgs(I->second, RV, Args); 2103 break; 2104 } 2105 } 2106 2107 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2108 CodeGen::AttributeListType AttributeList; 2109 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList); 2110 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), 2111 AttributeList.end()); 2112 2113 llvm::CallSite CS; 2114 if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { 2115 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); 2116 } else { 2117 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2118 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, 2119 Args.data(), Args.data()+Args.size()); 2120 EmitBlock(Cont); 2121 } 2122 2123 CS.setAttributes(Attrs); 2124 if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) 2125 CS.setCallingConv(F->getCallingConv()); 2126 2127 // If the call doesn't return, finish the basic block and clear the 2128 // insertion point; this allows the rest of IRgen to discard 2129 // unreachable code. 2130 if (CS.doesNotReturn()) { 2131 Builder.CreateUnreachable(); 2132 Builder.ClearInsertionPoint(); 2133 2134 // FIXME: For now, emit a dummy basic block because expr emitters in 2135 // generally are not ready to handle emitting expressions at unreachable 2136 // points. 2137 EnsureInsertPoint(); 2138 2139 // Return a reasonable RValue. 2140 return GetUndefRValue(RetTy); 2141 } 2142 2143 llvm::Instruction *CI = CS.getInstruction(); 2144 if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy) 2145 CI->setName("call"); 2146 2147 switch (RetAI.getKind()) { 2148 case ABIArgInfo::Indirect: 2149 if (RetTy->isAnyComplexType()) 2150 return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); 2151 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2152 return RValue::getAggregate(Args[0]); 2153 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); 2154 2155 case ABIArgInfo::Direct: 2156 if (RetTy->isAnyComplexType()) { 2157 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 2158 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 2159 return RValue::getComplex(std::make_pair(Real, Imag)); 2160 } 2161 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { 2162 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp"); 2163 Builder.CreateStore(CI, V); 2164 return RValue::getAggregate(V); 2165 } 2166 return RValue::get(CI); 2167 2168 case ABIArgInfo::Ignore: 2169 // If we are ignoring an argument that had a result, make sure to 2170 // construct the appropriate return value for our caller. 2171 return GetUndefRValue(RetTy); 2172 2173 case ABIArgInfo::Coerce: { 2174 // FIXME: Avoid the conversion through memory if possible. 2175 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce"); 2176 CreateCoercedStore(CI, V, *this); 2177 if (RetTy->isAnyComplexType()) 2178 return RValue::getComplex(LoadComplexFromAddr(V, false)); 2179 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) 2180 return RValue::getAggregate(V); 2181 return RValue::get(EmitLoadOfScalar(V, false, RetTy)); 2182 } 2183 2184 case ABIArgInfo::Expand: 2185 assert(0 && "Invalid ABI kind for return argument"); 2186 } 2187 2188 assert(0 && "Unhandled ABIArgInfo::Kind"); 2189 return RValue::get(0); 2190} 2191 2192/* VarArg handling */ 2193 2194llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 2195 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 2196} 2197