X86ISelLowering.h revision 243830
1//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef X86ISELLOWERING_H 16#define X86ISELLOWERING_H 17 18#include "X86Subtarget.h" 19#include "X86RegisterInfo.h" 20#include "X86MachineFunctionInfo.h" 21#include "llvm/Target/TargetLowering.h" 22#include "llvm/Target/TargetTransformImpl.h" 23#include "llvm/Target/TargetOptions.h" 24#include "llvm/CodeGen/FastISel.h" 25#include "llvm/CodeGen/SelectionDAG.h" 26#include "llvm/CodeGen/CallingConvLower.h" 27 28namespace llvm { 29 namespace X86ISD { 30 // X86 Specific DAG Nodes 31 enum NodeType { 32 // Start the numbering where the builtin ops leave off. 33 FIRST_NUMBER = ISD::BUILTIN_OP_END, 34 35 /// BSF - Bit scan forward. 36 /// BSR - Bit scan reverse. 37 BSF, 38 BSR, 39 40 /// SHLD, SHRD - Double shift instructions. These correspond to 41 /// X86::SHLDxx and X86::SHRDxx instructions. 42 SHLD, 43 SHRD, 44 45 /// FAND - Bitwise logical AND of floating point values. This corresponds 46 /// to X86::ANDPS or X86::ANDPD. 47 FAND, 48 49 /// FOR - Bitwise logical OR of floating point values. This corresponds 50 /// to X86::ORPS or X86::ORPD. 51 FOR, 52 53 /// FXOR - Bitwise logical XOR of floating point values. This corresponds 54 /// to X86::XORPS or X86::XORPD. 55 FXOR, 56 57 /// FSRL - Bitwise logical right shift of floating point values. These 58 /// corresponds to X86::PSRLDQ. 59 FSRL, 60 61 /// CALL - These operations represent an abstract X86 call 62 /// instruction, which includes a bunch of information. In particular the 63 /// operands of these node are: 64 /// 65 /// #0 - The incoming token chain 66 /// #1 - The callee 67 /// #2 - The number of arg bytes the caller pushes on the stack. 68 /// #3 - The number of arg bytes the callee pops off the stack. 69 /// #4 - The value to pass in AL/AX/EAX (optional) 70 /// #5 - The value to pass in DL/DX/EDX (optional) 71 /// 72 /// The result values of these nodes are: 73 /// 74 /// #0 - The outgoing token chain 75 /// #1 - The first register result value (optional) 76 /// #2 - The second register result value (optional) 77 /// 78 CALL, 79 80 /// RDTSC_DAG - This operation implements the lowering for 81 /// readcyclecounter 82 RDTSC_DAG, 83 84 /// X86 compare and logical compare instructions. 85 CMP, COMI, UCOMI, 86 87 /// X86 bit-test instructions. 88 BT, 89 90 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS 91 /// operand, usually produced by a CMP instruction. 92 SETCC, 93 94 // Same as SETCC except it's materialized with a sbb and the value is all 95 // one's or all zero's. 96 SETCC_CARRY, // R = carry_bit ? ~0 : 0 97 98 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD. 99 /// Operands are two FP values to compare; result is a mask of 100 /// 0s or 1s. Generally DTRT for C/C++ with NaNs. 101 FSETCCss, FSETCCsd, 102 103 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values, 104 /// result in an integer GPR. Needs masking for scalar result. 105 FGETSIGNx86, 106 107 /// X86 conditional moves. Operand 0 and operand 1 are the two values 108 /// to select from. Operand 2 is the condition code, and operand 3 is the 109 /// flag operand produced by a CMP or TEST instruction. It also writes a 110 /// flag result. 111 CMOV, 112 113 /// X86 conditional branches. Operand 0 is the chain operand, operand 1 114 /// is the block to branch if condition is true, operand 2 is the 115 /// condition code, and operand 3 is the flag operand produced by a CMP 116 /// or TEST instruction. 117 BRCOND, 118 119 /// Return with a flag operand. Operand 0 is the chain operand, operand 120 /// 1 is the number of bytes of stack to pop. 121 RET_FLAG, 122 123 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. 124 REP_STOS, 125 126 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. 127 REP_MOVS, 128 129 /// GlobalBaseReg - On Darwin, this node represents the result of the popl 130 /// at function entry, used for PIC code. 131 GlobalBaseReg, 132 133 /// Wrapper - A wrapper node for TargetConstantPool, 134 /// TargetExternalSymbol, and TargetGlobalAddress. 135 Wrapper, 136 137 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP 138 /// relative displacements. 139 WrapperRIP, 140 141 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector 142 /// to an MMX vector. If you think this is too close to the previous 143 /// mnemonic, so do I; blame Intel. 144 MOVDQ2Q, 145 146 /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX 147 /// vector to a GPR. 148 MMX_MOVD2W, 149 150 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to 151 /// i32, corresponds to X86::PEXTRB. 152 PEXTRB, 153 154 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to 155 /// i32, corresponds to X86::PEXTRW. 156 PEXTRW, 157 158 /// INSERTPS - Insert any element of a 4 x float vector into any element 159 /// of a destination 4 x floatvector. 160 INSERTPS, 161 162 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, 163 /// corresponds to X86::PINSRB. 164 PINSRB, 165 166 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, 167 /// corresponds to X86::PINSRW. 168 PINSRW, MMX_PINSRW, 169 170 /// PSHUFB - Shuffle 16 8-bit values within a vector. 171 PSHUFB, 172 173 /// ANDNP - Bitwise Logical AND NOT of Packed FP values. 174 ANDNP, 175 176 /// PSIGN - Copy integer sign. 177 PSIGN, 178 179 /// BLENDV - Blend where the selector is an XMM. 180 BLENDV, 181 182 /// BLENDxx - Blend where the selector is an immediate. 183 BLENDPW, 184 BLENDPS, 185 BLENDPD, 186 187 /// HADD - Integer horizontal add. 188 HADD, 189 190 /// HSUB - Integer horizontal sub. 191 HSUB, 192 193 /// FHADD - Floating point horizontal add. 194 FHADD, 195 196 /// FHSUB - Floating point horizontal sub. 197 FHSUB, 198 199 /// FMAX, FMIN - Floating point max and min. 200 /// 201 FMAX, FMIN, 202 203 /// FMAXC, FMINC - Commutative FMIN and FMAX. 204 FMAXC, FMINC, 205 206 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal 207 /// approximation. Note that these typically require refinement 208 /// in order to obtain suitable precision. 209 FRSQRT, FRCP, 210 211 // TLSADDR - Thread Local Storage. 212 TLSADDR, 213 214 // TLSBASEADDR - Thread Local Storage. A call to get the start address 215 // of the TLS block for the current module. 216 TLSBASEADDR, 217 218 // TLSCALL - Thread Local Storage. When calling to an OS provided 219 // thunk at the address from an earlier relocation. 220 TLSCALL, 221 222 // EH_RETURN - Exception Handling helpers. 223 EH_RETURN, 224 225 // EH_SJLJ_SETJMP - SjLj exception handling setjmp. 226 EH_SJLJ_SETJMP, 227 228 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. 229 EH_SJLJ_LONGJMP, 230 231 /// TC_RETURN - Tail call return. 232 /// operand #0 chain 233 /// operand #1 callee (register or absolute) 234 /// operand #2 stack adjustment 235 /// operand #3 optional in flag 236 TC_RETURN, 237 238 // VZEXT_MOVL - Vector move low and zero extend. 239 VZEXT_MOVL, 240 241 // VSEXT_MOVL - Vector move low and sign extend. 242 VSEXT_MOVL, 243 244 // VZEXT - Vector integer zero-extend. 245 VZEXT, 246 247 // VSEXT - Vector integer signed-extend. 248 VSEXT, 249 250 // VFPEXT - Vector FP extend. 251 VFPEXT, 252 253 // VFPROUND - Vector FP round. 254 VFPROUND, 255 256 // VSHL, VSRL - 128-bit vector logical left / right shift 257 VSHLDQ, VSRLDQ, 258 259 // VSHL, VSRL, VSRA - Vector shift elements 260 VSHL, VSRL, VSRA, 261 262 // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate 263 VSHLI, VSRLI, VSRAI, 264 265 // CMPP - Vector packed double/float comparison. 266 CMPP, 267 268 // PCMP* - Vector integer comparisons. 269 PCMPEQ, PCMPGT, 270 271 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. 272 ADD, SUB, ADC, SBB, SMUL, 273 INC, DEC, OR, XOR, AND, 274 275 ANDN, // ANDN - Bitwise AND NOT with FLAGS results. 276 277 BLSI, // BLSI - Extract lowest set isolated bit 278 BLSMSK, // BLSMSK - Get mask up to lowest set bit 279 BLSR, // BLSR - Reset lowest set bit 280 281 UMUL, // LOW, HI, FLAGS = umul LHS, RHS 282 283 // MUL_IMM - X86 specific multiply by immediate. 284 MUL_IMM, 285 286 // PTEST - Vector bitwise comparisons 287 PTEST, 288 289 // TESTP - Vector packed fp sign bitwise comparisons 290 TESTP, 291 292 // Several flavors of instructions with vector shuffle behaviors. 293 PALIGN, 294 PSHUFD, 295 PSHUFHW, 296 PSHUFLW, 297 SHUFP, 298 MOVDDUP, 299 MOVSHDUP, 300 MOVSLDUP, 301 MOVLHPS, 302 MOVLHPD, 303 MOVHLPS, 304 MOVLPS, 305 MOVLPD, 306 MOVSD, 307 MOVSS, 308 UNPCKL, 309 UNPCKH, 310 VPERMILP, 311 VPERMV, 312 VPERMI, 313 VPERM2X128, 314 VBROADCAST, 315 316 // PMULUDQ - Vector multiply packed unsigned doubleword integers 317 PMULUDQ, 318 319 // FMA nodes 320 FMADD, 321 FNMADD, 322 FMSUB, 323 FNMSUB, 324 FMADDSUB, 325 FMSUBADD, 326 327 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, 328 // according to %al. An operator is needed so that this can be expanded 329 // with control flow. 330 VASTART_SAVE_XMM_REGS, 331 332 // WIN_ALLOCA - Windows's _chkstk call to do stack probing. 333 WIN_ALLOCA, 334 335 // SEG_ALLOCA - For allocating variable amounts of stack space when using 336 // segmented stacks. Check if the current stacklet has enough space, and 337 // falls back to heap allocation if not. 338 SEG_ALLOCA, 339 340 // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui. 341 WIN_FTOL, 342 343 // Memory barrier 344 MEMBARRIER, 345 MFENCE, 346 SFENCE, 347 LFENCE, 348 349 // FNSTSW16r - Store FP status word into i16 register. 350 FNSTSW16r, 351 352 // SAHF - Store contents of %ah into %eflags. 353 SAHF, 354 355 // RDRAND - Get a random integer and indicate whether it is valid in CF. 356 RDRAND, 357 358 // PCMP*STRI 359 PCMPISTRI, 360 PCMPESTRI, 361 362 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, 363 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - 364 // Atomic 64-bit binary operations. 365 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, 366 ATOMSUB64_DAG, 367 ATOMOR64_DAG, 368 ATOMXOR64_DAG, 369 ATOMAND64_DAG, 370 ATOMNAND64_DAG, 371 ATOMMAX64_DAG, 372 ATOMMIN64_DAG, 373 ATOMUMAX64_DAG, 374 ATOMUMIN64_DAG, 375 ATOMSWAP64_DAG, 376 377 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. 378 LCMPXCHG_DAG, 379 LCMPXCHG8_DAG, 380 LCMPXCHG16_DAG, 381 382 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. 383 VZEXT_LOAD, 384 385 // FNSTCW16m - Store FP control world into i16 memory. 386 FNSTCW16m, 387 388 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the 389 /// integer destination in memory and a FP reg source. This corresponds 390 /// to the X86::FIST*m instructions and the rounding mode change stuff. It 391 /// has two inputs (token chain and address) and two outputs (int value 392 /// and token chain). 393 FP_TO_INT16_IN_MEM, 394 FP_TO_INT32_IN_MEM, 395 FP_TO_INT64_IN_MEM, 396 397 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the 398 /// integer source in memory and FP reg result. This corresponds to the 399 /// X86::FILD*m instructions. It has three inputs (token chain, address, 400 /// and source type) and two outputs (FP value and token chain). FILD_FLAG 401 /// also produces a flag). 402 FILD, 403 FILD_FLAG, 404 405 /// FLD - This instruction implements an extending load to FP stack slots. 406 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain 407 /// operand, ptr to load from, and a ValueType node indicating the type 408 /// to load to. 409 FLD, 410 411 /// FST - This instruction implements a truncating store to FP stack 412 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a 413 /// chain operand, value to store, address, and a ValueType to store it 414 /// as. 415 FST, 416 417 /// VAARG_64 - This instruction grabs the address of the next argument 418 /// from a va_list. (reads and modifies the va_list in memory) 419 VAARG_64 420 421 // WARNING: Do not add anything in the end unless you want the node to 422 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be 423 // thought as target memory ops! 424 }; 425 } 426 427 /// Define some predicates that are used for node matching. 428 namespace X86 { 429 /// isVEXTRACTF128Index - Return true if the specified 430 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 431 /// suitable for input to VEXTRACTF128. 432 bool isVEXTRACTF128Index(SDNode *N); 433 434 /// isVINSERTF128Index - Return true if the specified 435 /// INSERT_SUBVECTOR operand specifies a subvector insert that is 436 /// suitable for input to VINSERTF128. 437 bool isVINSERTF128Index(SDNode *N); 438 439 /// getExtractVEXTRACTF128Immediate - Return the appropriate 440 /// immediate to extract the specified EXTRACT_SUBVECTOR index 441 /// with VEXTRACTF128 instructions. 442 unsigned getExtractVEXTRACTF128Immediate(SDNode *N); 443 444 /// getInsertVINSERTF128Immediate - Return the appropriate 445 /// immediate to insert at the specified INSERT_SUBVECTOR index 446 /// with VINSERTF128 instructions. 447 unsigned getInsertVINSERTF128Immediate(SDNode *N); 448 449 /// isZeroNode - Returns true if Elt is a constant zero or a floating point 450 /// constant +0.0. 451 bool isZeroNode(SDValue Elt); 452 453 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be 454 /// fit into displacement field of the instruction. 455 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 456 bool hasSymbolicDisplacement = true); 457 458 459 /// isCalleePop - Determines whether the callee is required to pop its 460 /// own arguments. Callee pop is necessary to support tail calls. 461 bool isCalleePop(CallingConv::ID CallingConv, 462 bool is64Bit, bool IsVarArg, bool TailCallOpt); 463 } 464 465 //===--------------------------------------------------------------------===// 466 // X86TargetLowering - X86 Implementation of the TargetLowering interface 467 class X86TargetLowering : public TargetLowering { 468 public: 469 explicit X86TargetLowering(X86TargetMachine &TM); 470 471 virtual unsigned getJumpTableEncoding() const; 472 473 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; } 474 475 virtual const MCExpr * 476 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 477 const MachineBasicBlock *MBB, unsigned uid, 478 MCContext &Ctx) const; 479 480 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 481 /// jumptable. 482 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 483 SelectionDAG &DAG) const; 484 virtual const MCExpr * 485 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 486 unsigned JTI, MCContext &Ctx) const; 487 488 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 489 /// function arguments in the caller parameter area. For X86, aggregates 490 /// that contains are placed at 16-byte boundaries while the rest are at 491 /// 4-byte boundaries. 492 virtual unsigned getByValTypeAlignment(Type *Ty) const; 493 494 /// getOptimalMemOpType - Returns the target specific optimal type for load 495 /// and store operations as a result of memset, memcpy, and memmove 496 /// lowering. If DstAlign is zero that means it's safe to destination 497 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 498 /// means there isn't a need to check it against alignment requirement, 499 /// probably because the source does not need to be loaded. If 500 /// 'IsZeroVal' is true, that means it's safe to return a 501 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 502 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 503 /// constant so it does not need to be loaded. 504 /// It returns EVT::Other if the type should be determined using generic 505 /// target-independent logic. 506 virtual EVT 507 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 508 bool IsZeroVal, bool MemcpyStrSrc, 509 MachineFunction &MF) const; 510 511 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 512 /// unaligned memory accesses. of the specified type. 513 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { 514 return true; 515 } 516 517 /// LowerOperation - Provide custom lowering hooks for some operations. 518 /// 519 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 520 521 /// ReplaceNodeResults - Replace the results of node with an illegal result 522 /// type with new values built out of custom code. 523 /// 524 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 525 SelectionDAG &DAG) const; 526 527 528 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 529 530 /// isTypeDesirableForOp - Return true if the target has native support for 531 /// the specified value type and it is 'desirable' to use the type for the 532 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 533 /// instruction encodings are longer and some i16 instructions are slow. 534 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; 535 536 /// isTypeDesirable - Return true if the target has native support for the 537 /// specified value type and it is 'desirable' to use the type. e.g. On x86 538 /// i16 is legal, but undesirable since i16 instruction encodings are longer 539 /// and some i16 instructions are slow. 540 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; 541 542 virtual MachineBasicBlock * 543 EmitInstrWithCustomInserter(MachineInstr *MI, 544 MachineBasicBlock *MBB) const; 545 546 547 /// getTargetNodeName - This method returns the name of a target specific 548 /// DAG node. 549 virtual const char *getTargetNodeName(unsigned Opcode) const; 550 551 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 552 virtual EVT getSetCCResultType(EVT VT) const; 553 554 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 555 /// in Mask are known to be either zero or one and return them in the 556 /// KnownZero/KnownOne bitsets. 557 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 558 APInt &KnownZero, 559 APInt &KnownOne, 560 const SelectionDAG &DAG, 561 unsigned Depth = 0) const; 562 563 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the 564 // operation that are sign bits. 565 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 566 unsigned Depth) const; 567 568 virtual bool 569 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 570 571 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; 572 573 virtual bool ExpandInlineAsm(CallInst *CI) const; 574 575 ConstraintType getConstraintType(const std::string &Constraint) const; 576 577 /// Examine constraint string and operand type and determine a weight value. 578 /// The operand object must already have been set up with the operand type. 579 virtual ConstraintWeight getSingleConstraintMatchWeight( 580 AsmOperandInfo &info, const char *constraint) const; 581 582 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 583 584 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 585 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 586 /// true it means one of the asm constraint of the inline asm instruction 587 /// being processed is 'm'. 588 virtual void LowerAsmOperandForConstraint(SDValue Op, 589 std::string &Constraint, 590 std::vector<SDValue> &Ops, 591 SelectionDAG &DAG) const; 592 593 /// getRegForInlineAsmConstraint - Given a physical register constraint 594 /// (e.g. {edx}), return the register number and the register class for the 595 /// register. This should only be used for C_Register constraints. On 596 /// error, this returns a register number of 0. 597 std::pair<unsigned, const TargetRegisterClass*> 598 getRegForInlineAsmConstraint(const std::string &Constraint, 599 EVT VT) const; 600 601 /// isLegalAddressingMode - Return true if the addressing mode represented 602 /// by AM is legal for this target, for a load/store of the specified type. 603 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 604 605 /// isLegalICmpImmediate - Return true if the specified immediate is legal 606 /// icmp immediate, that is the target has icmp instructions which can 607 /// compare a register against the immediate without having to materialize 608 /// the immediate into a register. 609 virtual bool isLegalICmpImmediate(int64_t Imm) const; 610 611 /// isLegalAddImmediate - Return true if the specified immediate is legal 612 /// add immediate, that is the target has add instructions which can 613 /// add a register and the immediate without having to materialize 614 /// the immediate into a register. 615 virtual bool isLegalAddImmediate(int64_t Imm) const; 616 617 /// isTruncateFree - Return true if it's free to truncate a value of 618 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 619 /// register EAX to i16 by referencing its sub-register AX. 620 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 621 virtual bool isTruncateFree(EVT VT1, EVT VT2) const; 622 623 /// isZExtFree - Return true if any actual instruction that defines a 624 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result 625 /// register. This does not necessarily include registers defined in 626 /// unknown ways, such as incoming arguments, or copies from unknown 627 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 628 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 629 /// all instructions that define 32-bit values implicit zero-extend the 630 /// result out to 64 bits. 631 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const; 632 virtual bool isZExtFree(EVT VT1, EVT VT2) const; 633 634 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 635 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 636 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 637 /// is expanded to mul + add. 638 virtual bool isFMAFasterThanMulAndAdd(EVT) const { return true; } 639 640 /// isNarrowingProfitable - Return true if it's profitable to narrow 641 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 642 /// from i32 to i8 but not from i32 to i16. 643 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; 644 645 /// isFPImmLegal - Returns true if the target can instruction select the 646 /// specified FP immediate natively. If false, the legalizer will 647 /// materialize the FP immediate as a load from a constant pool. 648 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 649 650 /// isShuffleMaskLegal - Targets can use this to indicate that they only 651 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 652 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask 653 /// values are assumed to be legal. 654 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, 655 EVT VT) const; 656 657 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 658 /// used by Targets can use this to indicate if there is a suitable 659 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 660 /// pool entry. 661 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 662 EVT VT) const; 663 664 /// ShouldShrinkFPConstant - If true, then instruction selection should 665 /// seek to shrink the FP constant of the specified type to a smaller type 666 /// in order to save space and / or reduce runtime. 667 virtual bool ShouldShrinkFPConstant(EVT VT) const { 668 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more 669 // expensive than a straight movsd. On the other hand, it's important to 670 // shrink long double fp constant since fldt is very slow. 671 return !X86ScalarSSEf64 || VT == MVT::f80; 672 } 673 674 const X86Subtarget* getSubtarget() const { 675 return Subtarget; 676 } 677 678 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 679 /// computed in an SSE register, not on the X87 floating point stack. 680 bool isScalarFPTypeInSSEReg(EVT VT) const { 681 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 682 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 683 } 684 685 /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine 686 /// for fptoui. 687 bool isTargetFTOL() const { 688 return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); 689 } 690 691 /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be 692 /// used for fptoui to the given type. 693 bool isIntegerTypeFTOL(EVT VT) const { 694 return isTargetFTOL() && VT == MVT::i64; 695 } 696 697 /// createFastISel - This method returns a target specific FastISel object, 698 /// or null if the target does not support "fast" ISel. 699 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 700 const TargetLibraryInfo *libInfo) const; 701 702 /// getStackCookieLocation - Return true if the target stores stack 703 /// protector cookies at a fixed offset in some non-standard address 704 /// space, and populates the address space and offset as 705 /// appropriate. 706 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; 707 708 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, 709 SelectionDAG &DAG) const; 710 711 protected: 712 std::pair<const TargetRegisterClass*, uint8_t> 713 findRepresentativeClass(EVT VT) const; 714 715 private: 716 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 717 /// make the right decision when generating code for different targets. 718 const X86Subtarget *Subtarget; 719 const X86RegisterInfo *RegInfo; 720 const DataLayout *TD; 721 722 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 723 /// floating point ops. 724 /// When SSE is available, use it for f32 operations. 725 /// When SSE2 is available, use it for f64 operations. 726 bool X86ScalarSSEf32; 727 bool X86ScalarSSEf64; 728 729 /// LegalFPImmediates - A list of legal fp immediates. 730 std::vector<APFloat> LegalFPImmediates; 731 732 /// addLegalFPImmediate - Indicate that this x86 target can instruction 733 /// select the specified FP immediate natively. 734 void addLegalFPImmediate(const APFloat& Imm) { 735 LegalFPImmediates.push_back(Imm); 736 } 737 738 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 739 CallingConv::ID CallConv, bool isVarArg, 740 const SmallVectorImpl<ISD::InputArg> &Ins, 741 DebugLoc dl, SelectionDAG &DAG, 742 SmallVectorImpl<SDValue> &InVals) const; 743 SDValue LowerMemArgument(SDValue Chain, 744 CallingConv::ID CallConv, 745 const SmallVectorImpl<ISD::InputArg> &ArgInfo, 746 DebugLoc dl, SelectionDAG &DAG, 747 const CCValAssign &VA, MachineFrameInfo *MFI, 748 unsigned i) const; 749 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 750 DebugLoc dl, SelectionDAG &DAG, 751 const CCValAssign &VA, 752 ISD::ArgFlagsTy Flags) const; 753 754 // Call lowering helpers. 755 756 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 757 /// for tail call optimization. Targets which want to do tail call 758 /// optimization should implement this function. 759 bool IsEligibleForTailCallOptimization(SDValue Callee, 760 CallingConv::ID CalleeCC, 761 bool isVarArg, 762 bool isCalleeStructRet, 763 bool isCallerStructRet, 764 Type *RetTy, 765 const SmallVectorImpl<ISD::OutputArg> &Outs, 766 const SmallVectorImpl<SDValue> &OutVals, 767 const SmallVectorImpl<ISD::InputArg> &Ins, 768 SelectionDAG& DAG) const; 769 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; 770 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, 771 SDValue Chain, bool IsTailCall, bool Is64Bit, 772 int FPDiff, DebugLoc dl) const; 773 774 unsigned GetAlignedArgumentStackSize(unsigned StackSize, 775 SelectionDAG &DAG) const; 776 777 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 778 bool isSigned, 779 bool isReplace) const; 780 781 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 782 SelectionDAG &DAG) const; 783 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 784 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 785 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 786 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 787 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 788 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 789 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 790 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 791 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 792 int64_t Offset, SelectionDAG &DAG) const; 793 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 794 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 795 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; 796 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const; 797 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; 798 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 799 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 800 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; 801 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; 802 SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const; 803 SDValue lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; 804 SDValue lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; 805 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; 806 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; 807 SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; 808 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; 809 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; 810 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 811 SDValue LowerToBT(SDValue And, ISD::CondCode CC, 812 DebugLoc dl, SelectionDAG &DAG) const; 813 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 814 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; 815 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 816 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 817 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; 818 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 819 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 820 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 821 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 822 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 823 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 824 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; 825 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 826 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 827 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 828 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 829 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 830 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; 831 832 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 833 834 // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR 835 SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const; 836 SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const; 837 SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const; 838 839 SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const; 840 841 SDValue lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const; 842 843 virtual SDValue 844 LowerFormalArguments(SDValue Chain, 845 CallingConv::ID CallConv, bool isVarArg, 846 const SmallVectorImpl<ISD::InputArg> &Ins, 847 DebugLoc dl, SelectionDAG &DAG, 848 SmallVectorImpl<SDValue> &InVals) const; 849 virtual SDValue 850 LowerCall(CallLoweringInfo &CLI, 851 SmallVectorImpl<SDValue> &InVals) const; 852 853 virtual SDValue 854 LowerReturn(SDValue Chain, 855 CallingConv::ID CallConv, bool isVarArg, 856 const SmallVectorImpl<ISD::OutputArg> &Outs, 857 const SmallVectorImpl<SDValue> &OutVals, 858 DebugLoc dl, SelectionDAG &DAG) const; 859 860 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; 861 862 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 863 864 virtual EVT 865 getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 866 ISD::NodeType ExtendKind) const; 867 868 virtual bool 869 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 870 bool isVarArg, 871 const SmallVectorImpl<ISD::OutputArg> &Outs, 872 LLVMContext &Context) const; 873 874 /// Utility function to emit atomic-load-arith operations (and, or, xor, 875 /// nand, max, min, umax, umin). It takes the corresponding instruction to 876 /// expand, the associated machine basic block, and the associated X86 877 /// opcodes for reg/reg. 878 MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI, 879 MachineBasicBlock *MBB) const; 880 881 /// Utility function to emit atomic-load-arith operations (and, or, xor, 882 /// nand, add, sub, swap) for 64-bit operands on 32-bit target. 883 MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI, 884 MachineBasicBlock *MBB) const; 885 886 // Utility function to emit the low-level va_arg code for X86-64. 887 MachineBasicBlock *EmitVAARG64WithCustomInserter( 888 MachineInstr *MI, 889 MachineBasicBlock *MBB) const; 890 891 /// Utility function to emit the xmm reg save portion of va_start. 892 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( 893 MachineInstr *BInstr, 894 MachineBasicBlock *BB) const; 895 896 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, 897 MachineBasicBlock *BB) const; 898 899 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, 900 MachineBasicBlock *BB) const; 901 902 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI, 903 MachineBasicBlock *BB, 904 bool Is64Bit) const; 905 906 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, 907 MachineBasicBlock *BB) const; 908 909 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, 910 MachineBasicBlock *BB) const; 911 912 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, 913 MachineBasicBlock *MBB) const; 914 915 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, 916 MachineBasicBlock *MBB) const; 917 918 /// Emit nodes that will be selected as "test Op0,Op0", or something 919 /// equivalent, for use with the given x86 condition code. 920 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; 921 922 /// Emit nodes that will be selected as "cmp Op0,Op1", or something 923 /// equivalent, for use with the given x86 condition code. 924 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 925 SelectionDAG &DAG) const; 926 927 /// Convert a comparison if required by the subtarget. 928 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const; 929 }; 930 931 namespace X86 { 932 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 933 const TargetLibraryInfo *libInfo); 934 } 935 936 class X86VectorTargetTransformInfo : public VectorTargetTransformImpl { 937 public: 938 explicit X86VectorTargetTransformInfo(const TargetLowering *TL) : 939 VectorTargetTransformImpl(TL) {} 940 941 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; 942 943 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 944 unsigned Index) const; 945 946 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 947 Type *CondTy) const; 948 949 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 950 Type *Src) const; 951 }; 952} 953 954#endif // X86ISELLOWERING_H 955