X86ISelLowering.h revision 219077
1//===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that X86 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef X86ISELLOWERING_H 16#define X86ISELLOWERING_H 17 18#include "X86Subtarget.h" 19#include "X86RegisterInfo.h" 20#include "X86MachineFunctionInfo.h" 21#include "llvm/Target/TargetLowering.h" 22#include "llvm/Target/TargetOptions.h" 23#include "llvm/CodeGen/FastISel.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/CodeGen/CallingConvLower.h" 26 27namespace llvm { 28 namespace X86ISD { 29 // X86 Specific DAG Nodes 30 enum NodeType { 31 // Start the numbering where the builtin ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 /// BSF - Bit scan forward. 35 /// BSR - Bit scan reverse. 36 BSF, 37 BSR, 38 39 /// SHLD, SHRD - Double shift instructions. These correspond to 40 /// X86::SHLDxx and X86::SHRDxx instructions. 41 SHLD, 42 SHRD, 43 44 /// FAND - Bitwise logical AND of floating point values. This corresponds 45 /// to X86::ANDPS or X86::ANDPD. 46 FAND, 47 48 /// FOR - Bitwise logical OR of floating point values. This corresponds 49 /// to X86::ORPS or X86::ORPD. 50 FOR, 51 52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds 53 /// to X86::XORPS or X86::XORPD. 54 FXOR, 55 56 /// FSRL - Bitwise logical right shift of floating point values. These 57 /// corresponds to X86::PSRLDQ. 58 FSRL, 59 60 /// CALL - These operations represent an abstract X86 call 61 /// instruction, which includes a bunch of information. In particular the 62 /// operands of these node are: 63 /// 64 /// #0 - The incoming token chain 65 /// #1 - The callee 66 /// #2 - The number of arg bytes the caller pushes on the stack. 67 /// #3 - The number of arg bytes the callee pops off the stack. 68 /// #4 - The value to pass in AL/AX/EAX (optional) 69 /// #5 - The value to pass in DL/DX/EDX (optional) 70 /// 71 /// The result values of these nodes are: 72 /// 73 /// #0 - The outgoing token chain 74 /// #1 - The first register result value (optional) 75 /// #2 - The second register result value (optional) 76 /// 77 CALL, 78 79 /// RDTSC_DAG - This operation implements the lowering for 80 /// readcyclecounter 81 RDTSC_DAG, 82 83 /// X86 compare and logical compare instructions. 84 CMP, COMI, UCOMI, 85 86 /// X86 bit-test instructions. 87 BT, 88 89 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS 90 /// operand, usually produced by a CMP instruction. 91 SETCC, 92 93 // Same as SETCC except it's materialized with a sbb and the value is all 94 // one's or all zero's. 95 SETCC_CARRY, // R = carry_bit ? ~0 : 0 96 97 /// X86 conditional moves. Operand 0 and operand 1 are the two values 98 /// to select from. Operand 2 is the condition code, and operand 3 is the 99 /// flag operand produced by a CMP or TEST instruction. It also writes a 100 /// flag result. 101 CMOV, 102 103 /// X86 conditional branches. Operand 0 is the chain operand, operand 1 104 /// is the block to branch if condition is true, operand 2 is the 105 /// condition code, and operand 3 is the flag operand produced by a CMP 106 /// or TEST instruction. 107 BRCOND, 108 109 /// Return with a flag operand. Operand 0 is the chain operand, operand 110 /// 1 is the number of bytes of stack to pop. 111 RET_FLAG, 112 113 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. 114 REP_STOS, 115 116 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. 117 REP_MOVS, 118 119 /// GlobalBaseReg - On Darwin, this node represents the result of the popl 120 /// at function entry, used for PIC code. 121 GlobalBaseReg, 122 123 /// Wrapper - A wrapper node for TargetConstantPool, 124 /// TargetExternalSymbol, and TargetGlobalAddress. 125 Wrapper, 126 127 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP 128 /// relative displacements. 129 WrapperRIP, 130 131 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word 132 /// of an XMM vector, with the high word zero filled. 133 MOVQ2DQ, 134 135 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector 136 /// to an MMX vector. If you think this is too close to the previous 137 /// mnemonic, so do I; blame Intel. 138 MOVDQ2Q, 139 140 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to 141 /// i32, corresponds to X86::PEXTRB. 142 PEXTRB, 143 144 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to 145 /// i32, corresponds to X86::PEXTRW. 146 PEXTRW, 147 148 /// INSERTPS - Insert any element of a 4 x float vector into any element 149 /// of a destination 4 x floatvector. 150 INSERTPS, 151 152 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, 153 /// corresponds to X86::PINSRB. 154 PINSRB, 155 156 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, 157 /// corresponds to X86::PINSRW. 158 PINSRW, MMX_PINSRW, 159 160 /// PSHUFB - Shuffle 16 8-bit values within a vector. 161 PSHUFB, 162 163 /// PANDN - and with not'd value. 164 PANDN, 165 166 /// PSIGNB/W/D - Copy integer sign. 167 PSIGNB, PSIGNW, PSIGND, 168 169 /// PBLENDVB - Variable blend 170 PBLENDVB, 171 172 /// FMAX, FMIN - Floating point max and min. 173 /// 174 FMAX, FMIN, 175 176 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal 177 /// approximation. Note that these typically require refinement 178 /// in order to obtain suitable precision. 179 FRSQRT, FRCP, 180 181 // TLSADDR - Thread Local Storage. 182 TLSADDR, 183 184 // TLSCALL - Thread Local Storage. When calling to an OS provided 185 // thunk at the address from an earlier relocation. 186 TLSCALL, 187 188 // EH_RETURN - Exception Handling helpers. 189 EH_RETURN, 190 191 /// TC_RETURN - Tail call return. 192 /// operand #0 chain 193 /// operand #1 callee (register or absolute) 194 /// operand #2 stack adjustment 195 /// operand #3 optional in flag 196 TC_RETURN, 197 198 // VZEXT_MOVL - Vector move low and zero extend. 199 VZEXT_MOVL, 200 201 // VSHL, VSRL - Vector logical left / right shift. 202 VSHL, VSRL, 203 204 // CMPPD, CMPPS - Vector double/float comparison. 205 // CMPPD, CMPPS - Vector double/float comparison. 206 CMPPD, CMPPS, 207 208 // PCMP* - Vector integer comparisons. 209 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ, 210 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ, 211 212 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. 213 ADD, SUB, ADC, SBB, SMUL, 214 INC, DEC, OR, XOR, AND, 215 216 UMUL, // LOW, HI, FLAGS = umul LHS, RHS 217 218 // MUL_IMM - X86 specific multiply by immediate. 219 MUL_IMM, 220 221 // PTEST - Vector bitwise comparisons 222 PTEST, 223 224 // TESTP - Vector packed fp sign bitwise comparisons 225 TESTP, 226 227 // Several flavors of instructions with vector shuffle behaviors. 228 PALIGN, 229 PSHUFD, 230 PSHUFHW, 231 PSHUFLW, 232 PSHUFHW_LD, 233 PSHUFLW_LD, 234 SHUFPD, 235 SHUFPS, 236 MOVDDUP, 237 MOVSHDUP, 238 MOVSLDUP, 239 MOVSHDUP_LD, 240 MOVSLDUP_LD, 241 MOVLHPS, 242 MOVLHPD, 243 MOVHLPS, 244 MOVHLPD, 245 MOVLPS, 246 MOVLPD, 247 MOVSD, 248 MOVSS, 249 UNPCKLPS, 250 UNPCKLPD, 251 VUNPCKLPS, 252 VUNPCKLPD, 253 VUNPCKLPSY, 254 VUNPCKLPDY, 255 UNPCKHPS, 256 UNPCKHPD, 257 PUNPCKLBW, 258 PUNPCKLWD, 259 PUNPCKLDQ, 260 PUNPCKLQDQ, 261 PUNPCKHBW, 262 PUNPCKHWD, 263 PUNPCKHDQ, 264 PUNPCKHQDQ, 265 266 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, 267 // according to %al. An operator is needed so that this can be expanded 268 // with control flow. 269 VASTART_SAVE_XMM_REGS, 270 271 // WIN_ALLOCA - Windows's _chkstk call to do stack probing. 272 WIN_ALLOCA, 273 274 // Memory barrier 275 MEMBARRIER, 276 MFENCE, 277 SFENCE, 278 LFENCE, 279 280 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, 281 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - 282 // Atomic 64-bit binary operations. 283 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, 284 ATOMSUB64_DAG, 285 ATOMOR64_DAG, 286 ATOMXOR64_DAG, 287 ATOMAND64_DAG, 288 ATOMNAND64_DAG, 289 ATOMSWAP64_DAG, 290 291 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap. 292 LCMPXCHG_DAG, 293 LCMPXCHG8_DAG, 294 295 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. 296 VZEXT_LOAD, 297 298 // FNSTCW16m - Store FP control world into i16 memory. 299 FNSTCW16m, 300 301 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the 302 /// integer destination in memory and a FP reg source. This corresponds 303 /// to the X86::FIST*m instructions and the rounding mode change stuff. It 304 /// has two inputs (token chain and address) and two outputs (int value 305 /// and token chain). 306 FP_TO_INT16_IN_MEM, 307 FP_TO_INT32_IN_MEM, 308 FP_TO_INT64_IN_MEM, 309 310 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the 311 /// integer source in memory and FP reg result. This corresponds to the 312 /// X86::FILD*m instructions. It has three inputs (token chain, address, 313 /// and source type) and two outputs (FP value and token chain). FILD_FLAG 314 /// also produces a flag). 315 FILD, 316 FILD_FLAG, 317 318 /// FLD - This instruction implements an extending load to FP stack slots. 319 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain 320 /// operand, ptr to load from, and a ValueType node indicating the type 321 /// to load to. 322 FLD, 323 324 /// FST - This instruction implements a truncating store to FP stack 325 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a 326 /// chain operand, value to store, address, and a ValueType to store it 327 /// as. 328 FST, 329 330 /// VAARG_64 - This instruction grabs the address of the next argument 331 /// from a va_list. (reads and modifies the va_list in memory) 332 VAARG_64 333 334 // WARNING: Do not add anything in the end unless you want the node to 335 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be 336 // thought as target memory ops! 337 }; 338 } 339 340 /// Define some predicates that are used for node matching. 341 namespace X86 { 342 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand 343 /// specifies a shuffle of elements that is suitable for input to PSHUFD. 344 bool isPSHUFDMask(ShuffleVectorSDNode *N); 345 346 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand 347 /// specifies a shuffle of elements that is suitable for input to PSHUFD. 348 bool isPSHUFHWMask(ShuffleVectorSDNode *N); 349 350 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand 351 /// specifies a shuffle of elements that is suitable for input to PSHUFD. 352 bool isPSHUFLWMask(ShuffleVectorSDNode *N); 353 354 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand 355 /// specifies a shuffle of elements that is suitable for input to SHUFP*. 356 bool isSHUFPMask(ShuffleVectorSDNode *N); 357 358 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand 359 /// specifies a shuffle of elements that is suitable for input to MOVHLPS. 360 bool isMOVHLPSMask(ShuffleVectorSDNode *N); 361 362 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form 363 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef, 364 /// <2, 3, 2, 3> 365 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N); 366 367 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand 368 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}. 369 bool isMOVLPMask(ShuffleVectorSDNode *N); 370 371 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand 372 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}. 373 /// as well as MOVLHPS. 374 bool isMOVLHPSMask(ShuffleVectorSDNode *N); 375 376 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand 377 /// specifies a shuffle of elements that is suitable for input to UNPCKL. 378 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool V2IsSplat = false); 379 380 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand 381 /// specifies a shuffle of elements that is suitable for input to UNPCKH. 382 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool V2IsSplat = false); 383 384 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form 385 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef, 386 /// <0, 0, 1, 1> 387 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N); 388 389 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form 390 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef, 391 /// <2, 2, 3, 3> 392 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N); 393 394 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand 395 /// specifies a shuffle of elements that is suitable for input to MOVSS, 396 /// MOVSD, and MOVD, i.e. setting the lowest element. 397 bool isMOVLMask(ShuffleVectorSDNode *N); 398 399 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand 400 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP. 401 bool isMOVSHDUPMask(ShuffleVectorSDNode *N); 402 403 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand 404 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP. 405 bool isMOVSLDUPMask(ShuffleVectorSDNode *N); 406 407 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand 408 /// specifies a shuffle of elements that is suitable for input to MOVDDUP. 409 bool isMOVDDUPMask(ShuffleVectorSDNode *N); 410 411 /// isPALIGNRMask - Return true if the specified VECTOR_SHUFFLE operand 412 /// specifies a shuffle of elements that is suitable for input to PALIGNR. 413 bool isPALIGNRMask(ShuffleVectorSDNode *N); 414 415 /// isVEXTRACTF128Index - Return true if the specified 416 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 417 /// suitable for input to VEXTRACTF128. 418 bool isVEXTRACTF128Index(SDNode *N); 419 420 /// isVINSERTF128Index - Return true if the specified 421 /// INSERT_SUBVECTOR operand specifies a subvector insert that is 422 /// suitable for input to VINSERTF128. 423 bool isVINSERTF128Index(SDNode *N); 424 425 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle 426 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP* 427 /// instructions. 428 unsigned getShuffleSHUFImmediate(SDNode *N); 429 430 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle 431 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction. 432 unsigned getShufflePSHUFHWImmediate(SDNode *N); 433 434 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle 435 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction. 436 unsigned getShufflePSHUFLWImmediate(SDNode *N); 437 438 /// getShufflePALIGNRImmediate - Return the appropriate immediate to shuffle 439 /// the specified VECTOR_SHUFFLE mask with the PALIGNR instruction. 440 unsigned getShufflePALIGNRImmediate(SDNode *N); 441 442 /// getExtractVEXTRACTF128Immediate - Return the appropriate 443 /// immediate to extract the specified EXTRACT_SUBVECTOR index 444 /// with VEXTRACTF128 instructions. 445 unsigned getExtractVEXTRACTF128Immediate(SDNode *N); 446 447 /// getInsertVINSERTF128Immediate - Return the appropriate 448 /// immediate to insert at the specified INSERT_SUBVECTOR index 449 /// with VINSERTF128 instructions. 450 unsigned getInsertVINSERTF128Immediate(SDNode *N); 451 452 /// isZeroNode - Returns true if Elt is a constant zero or a floating point 453 /// constant +0.0. 454 bool isZeroNode(SDValue Elt); 455 456 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be 457 /// fit into displacement field of the instruction. 458 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 459 bool hasSymbolicDisplacement = true); 460 } 461 462 //===--------------------------------------------------------------------===// 463 // X86TargetLowering - X86 Implementation of the TargetLowering interface 464 class X86TargetLowering : public TargetLowering { 465 public: 466 explicit X86TargetLowering(X86TargetMachine &TM); 467 468 virtual unsigned getJumpTableEncoding() const; 469 470 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; } 471 472 virtual const MCExpr * 473 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 474 const MachineBasicBlock *MBB, unsigned uid, 475 MCContext &Ctx) const; 476 477 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 478 /// jumptable. 479 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 480 SelectionDAG &DAG) const; 481 virtual const MCExpr * 482 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 483 unsigned JTI, MCContext &Ctx) const; 484 485 /// getStackPtrReg - Return the stack pointer register we are using: either 486 /// ESP or RSP. 487 unsigned getStackPtrReg() const { return X86StackPtr; } 488 489 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 490 /// function arguments in the caller parameter area. For X86, aggregates 491 /// that contains are placed at 16-byte boundaries while the rest are at 492 /// 4-byte boundaries. 493 virtual unsigned getByValTypeAlignment(const Type *Ty) const; 494 495 /// getOptimalMemOpType - Returns the target specific optimal type for load 496 /// and store operations as a result of memset, memcpy, and memmove 497 /// lowering. If DstAlign is zero that means it's safe to destination 498 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 499 /// means there isn't a need to check it against alignment requirement, 500 /// probably because the source does not need to be loaded. If 501 /// 'NonScalarIntSafe' is true, that means it's safe to return a 502 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 503 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 504 /// constant so it does not need to be loaded. 505 /// It returns EVT::Other if the type should be determined using generic 506 /// target-independent logic. 507 virtual EVT 508 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 509 bool NonScalarIntSafe, bool MemcpyStrSrc, 510 MachineFunction &MF) const; 511 512 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 513 /// unaligned memory accesses. of the specified type. 514 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const { 515 return true; 516 } 517 518 /// LowerOperation - Provide custom lowering hooks for some operations. 519 /// 520 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 521 522 /// ReplaceNodeResults - Replace the results of node with an illegal result 523 /// type with new values built out of custom code. 524 /// 525 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 526 SelectionDAG &DAG) const; 527 528 529 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 530 531 /// isTypeDesirableForOp - Return true if the target has native support for 532 /// the specified value type and it is 'desirable' to use the type for the 533 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 534 /// instruction encodings are longer and some i16 instructions are slow. 535 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; 536 537 /// isTypeDesirable - Return true if the target has native support for the 538 /// specified value type and it is 'desirable' to use the type. e.g. On x86 539 /// i16 is legal, but undesirable since i16 instruction encodings are longer 540 /// and some i16 instructions are slow. 541 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; 542 543 virtual MachineBasicBlock * 544 EmitInstrWithCustomInserter(MachineInstr *MI, 545 MachineBasicBlock *MBB) const; 546 547 548 /// getTargetNodeName - This method returns the name of a target specific 549 /// DAG node. 550 virtual const char *getTargetNodeName(unsigned Opcode) const; 551 552 /// getSetCCResultType - Return the ISD::SETCC ValueType 553 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; 554 555 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 556 /// in Mask are known to be either zero or one and return them in the 557 /// KnownZero/KnownOne bitsets. 558 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 559 const APInt &Mask, 560 APInt &KnownZero, 561 APInt &KnownOne, 562 const SelectionDAG &DAG, 563 unsigned Depth = 0) const; 564 565 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the 566 // operation that are sign bits. 567 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 568 unsigned Depth) const; 569 570 virtual bool 571 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 572 573 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; 574 575 virtual bool ExpandInlineAsm(CallInst *CI) const; 576 577 ConstraintType getConstraintType(const std::string &Constraint) const; 578 579 /// Examine constraint string and operand type and determine a weight value. 580 /// The operand object must already have been set up with the operand type. 581 virtual ConstraintWeight getSingleConstraintMatchWeight( 582 AsmOperandInfo &info, const char *constraint) const; 583 584 std::vector<unsigned> 585 getRegClassForInlineAsmConstraint(const std::string &Constraint, 586 EVT VT) const; 587 588 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 589 590 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 591 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 592 /// true it means one of the asm constraint of the inline asm instruction 593 /// being processed is 'm'. 594 virtual void LowerAsmOperandForConstraint(SDValue Op, 595 char ConstraintLetter, 596 std::vector<SDValue> &Ops, 597 SelectionDAG &DAG) const; 598 599 /// getRegForInlineAsmConstraint - Given a physical register constraint 600 /// (e.g. {edx}), return the register number and the register class for the 601 /// register. This should only be used for C_Register constraints. On 602 /// error, this returns a register number of 0. 603 std::pair<unsigned, const TargetRegisterClass*> 604 getRegForInlineAsmConstraint(const std::string &Constraint, 605 EVT VT) const; 606 607 /// isLegalAddressingMode - Return true if the addressing mode represented 608 /// by AM is legal for this target, for a load/store of the specified type. 609 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const; 610 611 /// isTruncateFree - Return true if it's free to truncate a value of 612 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 613 /// register EAX to i16 by referencing its sub-register AX. 614 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const; 615 virtual bool isTruncateFree(EVT VT1, EVT VT2) const; 616 617 /// isZExtFree - Return true if any actual instruction that defines a 618 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result 619 /// register. This does not necessarily include registers defined in 620 /// unknown ways, such as incoming arguments, or copies from unknown 621 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 622 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 623 /// all instructions that define 32-bit values implicit zero-extend the 624 /// result out to 64 bits. 625 virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const; 626 virtual bool isZExtFree(EVT VT1, EVT VT2) const; 627 628 /// isNarrowingProfitable - Return true if it's profitable to narrow 629 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 630 /// from i32 to i8 but not from i32 to i16. 631 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; 632 633 /// isFPImmLegal - Returns true if the target can instruction select the 634 /// specified FP immediate natively. If false, the legalizer will 635 /// materialize the FP immediate as a load from a constant pool. 636 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 637 638 /// isShuffleMaskLegal - Targets can use this to indicate that they only 639 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 640 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask 641 /// values are assumed to be legal. 642 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, 643 EVT VT) const; 644 645 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 646 /// used by Targets can use this to indicate if there is a suitable 647 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 648 /// pool entry. 649 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 650 EVT VT) const; 651 652 /// ShouldShrinkFPConstant - If true, then instruction selection should 653 /// seek to shrink the FP constant of the specified type to a smaller type 654 /// in order to save space and / or reduce runtime. 655 virtual bool ShouldShrinkFPConstant(EVT VT) const { 656 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more 657 // expensive than a straight movsd. On the other hand, it's important to 658 // shrink long double fp constant since fldt is very slow. 659 return !X86ScalarSSEf64 || VT == MVT::f80; 660 } 661 662 const X86Subtarget* getSubtarget() const { 663 return Subtarget; 664 } 665 666 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 667 /// computed in an SSE register, not on the X87 floating point stack. 668 bool isScalarFPTypeInSSEReg(EVT VT) const { 669 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 670 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 671 } 672 673 /// createFastISel - This method returns a target specific FastISel object, 674 /// or null if the target does not support "fast" ISel. 675 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; 676 677 /// getFunctionAlignment - Return the Log2 alignment of this function. 678 virtual unsigned getFunctionAlignment(const Function *F) const; 679 680 unsigned getRegPressureLimit(const TargetRegisterClass *RC, 681 MachineFunction &MF) const; 682 683 /// getStackCookieLocation - Return true if the target stores stack 684 /// protector cookies at a fixed offset in some non-standard address 685 /// space, and populates the address space and offset as 686 /// appropriate. 687 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; 688 689 protected: 690 std::pair<const TargetRegisterClass*, uint8_t> 691 findRepresentativeClass(EVT VT) const; 692 693 private: 694 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 695 /// make the right decision when generating code for different targets. 696 const X86Subtarget *Subtarget; 697 const X86RegisterInfo *RegInfo; 698 const TargetData *TD; 699 700 /// X86StackPtr - X86 physical register used as stack ptr. 701 unsigned X86StackPtr; 702 703 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 704 /// floating point ops. 705 /// When SSE is available, use it for f32 operations. 706 /// When SSE2 is available, use it for f64 operations. 707 bool X86ScalarSSEf32; 708 bool X86ScalarSSEf64; 709 710 /// LegalFPImmediates - A list of legal fp immediates. 711 std::vector<APFloat> LegalFPImmediates; 712 713 /// addLegalFPImmediate - Indicate that this x86 target can instruction 714 /// select the specified FP immediate natively. 715 void addLegalFPImmediate(const APFloat& Imm) { 716 LegalFPImmediates.push_back(Imm); 717 } 718 719 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 720 CallingConv::ID CallConv, bool isVarArg, 721 const SmallVectorImpl<ISD::InputArg> &Ins, 722 DebugLoc dl, SelectionDAG &DAG, 723 SmallVectorImpl<SDValue> &InVals) const; 724 SDValue LowerMemArgument(SDValue Chain, 725 CallingConv::ID CallConv, 726 const SmallVectorImpl<ISD::InputArg> &ArgInfo, 727 DebugLoc dl, SelectionDAG &DAG, 728 const CCValAssign &VA, MachineFrameInfo *MFI, 729 unsigned i) const; 730 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 731 DebugLoc dl, SelectionDAG &DAG, 732 const CCValAssign &VA, 733 ISD::ArgFlagsTy Flags) const; 734 735 // Call lowering helpers. 736 737 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 738 /// for tail call optimization. Targets which want to do tail call 739 /// optimization should implement this function. 740 bool IsEligibleForTailCallOptimization(SDValue Callee, 741 CallingConv::ID CalleeCC, 742 bool isVarArg, 743 bool isCalleeStructRet, 744 bool isCallerStructRet, 745 const SmallVectorImpl<ISD::OutputArg> &Outs, 746 const SmallVectorImpl<SDValue> &OutVals, 747 const SmallVectorImpl<ISD::InputArg> &Ins, 748 SelectionDAG& DAG) const; 749 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; 750 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, 751 SDValue Chain, bool IsTailCall, bool Is64Bit, 752 int FPDiff, DebugLoc dl) const; 753 754 unsigned GetAlignedArgumentStackSize(unsigned StackSize, 755 SelectionDAG &DAG) const; 756 757 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 758 bool isSigned) const; 759 760 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, 761 SelectionDAG &DAG) const; 762 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 763 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 764 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 765 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 766 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 767 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 768 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; 769 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 770 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 771 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 772 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 773 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 774 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, 775 int64_t Offset, SelectionDAG &DAG) const; 776 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 777 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 778 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; 779 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; 780 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, 781 SelectionDAG &DAG) const; 782 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; 783 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 784 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 785 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; 786 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; 787 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; 788 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; 789 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; 790 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; 791 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 792 SDValue LowerToBT(SDValue And, ISD::CondCode CC, 793 DebugLoc dl, SelectionDAG &DAG) const; 794 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 795 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; 796 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 797 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 798 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; 799 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 800 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 801 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 802 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 803 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 804 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 805 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 806 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 807 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; 808 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 809 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 810 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 811 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; 812 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; 813 SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const; 814 SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const; 815 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 816 817 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; 818 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 819 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; 820 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; 821 822 // Utility functions to help LowerVECTOR_SHUFFLE 823 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const; 824 825 virtual SDValue 826 LowerFormalArguments(SDValue Chain, 827 CallingConv::ID CallConv, bool isVarArg, 828 const SmallVectorImpl<ISD::InputArg> &Ins, 829 DebugLoc dl, SelectionDAG &DAG, 830 SmallVectorImpl<SDValue> &InVals) const; 831 virtual SDValue 832 LowerCall(SDValue Chain, SDValue Callee, 833 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, 834 const SmallVectorImpl<ISD::OutputArg> &Outs, 835 const SmallVectorImpl<SDValue> &OutVals, 836 const SmallVectorImpl<ISD::InputArg> &Ins, 837 DebugLoc dl, SelectionDAG &DAG, 838 SmallVectorImpl<SDValue> &InVals) const; 839 840 virtual SDValue 841 LowerReturn(SDValue Chain, 842 CallingConv::ID CallConv, bool isVarArg, 843 const SmallVectorImpl<ISD::OutputArg> &Outs, 844 const SmallVectorImpl<SDValue> &OutVals, 845 DebugLoc dl, SelectionDAG &DAG) const; 846 847 virtual bool isUsedByReturnOnly(SDNode *N) const; 848 849 virtual bool 850 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, 851 const SmallVectorImpl<ISD::OutputArg> &Outs, 852 LLVMContext &Context) const; 853 854 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results, 855 SelectionDAG &DAG, unsigned NewOp) const; 856 857 /// Utility function to emit string processing sse4.2 instructions 858 /// that return in xmm0. 859 /// This takes the instruction to expand, the associated machine basic 860 /// block, the number of args, and whether or not the second arg is 861 /// in memory or not. 862 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, 863 unsigned argNum, bool inMem) const; 864 865 /// Utility functions to emit monitor and mwait instructions. These 866 /// need to make sure that the arguments to the intrinsic are in the 867 /// correct registers. 868 MachineBasicBlock *EmitMonitor(MachineInstr *MI, 869 MachineBasicBlock *BB) const; 870 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const; 871 872 /// Utility function to emit atomic bitwise operations (and, or, xor). 873 /// It takes the bitwise instruction to expand, the associated machine basic 874 /// block, and the associated X86 opcodes for reg/reg and reg/imm. 875 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( 876 MachineInstr *BInstr, 877 MachineBasicBlock *BB, 878 unsigned regOpc, 879 unsigned immOpc, 880 unsigned loadOpc, 881 unsigned cxchgOpc, 882 unsigned notOpc, 883 unsigned EAXreg, 884 TargetRegisterClass *RC, 885 bool invSrc = false) const; 886 887 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( 888 MachineInstr *BInstr, 889 MachineBasicBlock *BB, 890 unsigned regOpcL, 891 unsigned regOpcH, 892 unsigned immOpcL, 893 unsigned immOpcH, 894 bool invSrc = false) const; 895 896 /// Utility function to emit atomic min and max. It takes the min/max 897 /// instruction to expand, the associated basic block, and the associated 898 /// cmov opcode for moving the min or max value. 899 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr, 900 MachineBasicBlock *BB, 901 unsigned cmovOpc) const; 902 903 // Utility function to emit the low-level va_arg code for X86-64. 904 MachineBasicBlock *EmitVAARG64WithCustomInserter( 905 MachineInstr *MI, 906 MachineBasicBlock *MBB) const; 907 908 /// Utility function to emit the xmm reg save portion of va_start. 909 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( 910 MachineInstr *BInstr, 911 MachineBasicBlock *BB) const; 912 913 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, 914 MachineBasicBlock *BB) const; 915 916 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, 917 MachineBasicBlock *BB) const; 918 919 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, 920 MachineBasicBlock *BB) const; 921 922 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, 923 MachineBasicBlock *BB) const; 924 925 /// Emit nodes that will be selected as "test Op0,Op0", or something 926 /// equivalent, for use with the given x86 condition code. 927 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; 928 929 /// Emit nodes that will be selected as "cmp Op0,Op1", or something 930 /// equivalent, for use with the given x86 condition code. 931 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 932 SelectionDAG &DAG) const; 933 }; 934 935 namespace X86 { 936 FastISel *createFastISel(FunctionLoweringInfo &funcInfo); 937 } 938} 939 940#endif // X86ISELLOWERING_H 941