X86ISelDAGToDAG.cpp revision 203954
1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15// Force NDEBUG on in any optimized build on Darwin. 16// 17// FIXME: This is a huge hack, to work around ridiculously awful compile times 18// on this file with gcc-4.2 on Darwin, in Release mode. 19#if (!defined(__llvm__) && defined(__APPLE__) && \ 20 defined(__OPTIMIZE__) && !defined(NDEBUG)) 21#define NDEBUG 22#endif 23 24#define DEBUG_TYPE "x86-isel" 25#include "X86.h" 26#include "X86InstrBuilder.h" 27#include "X86ISelLowering.h" 28#include "X86MachineFunctionInfo.h" 29#include "X86RegisterInfo.h" 30#include "X86Subtarget.h" 31#include "X86TargetMachine.h" 32#include "llvm/GlobalValue.h" 33#include "llvm/Instructions.h" 34#include "llvm/Intrinsics.h" 35#include "llvm/Support/CFG.h" 36#include "llvm/Type.h" 37#include "llvm/CodeGen/MachineConstantPool.h" 38#include "llvm/CodeGen/MachineFunction.h" 39#include "llvm/CodeGen/MachineFrameInfo.h" 40#include "llvm/CodeGen/MachineInstrBuilder.h" 41#include "llvm/CodeGen/MachineRegisterInfo.h" 42#include "llvm/CodeGen/SelectionDAGISel.h" 43#include "llvm/Target/TargetMachine.h" 44#include "llvm/Target/TargetOptions.h" 45#include "llvm/Support/Debug.h" 46#include "llvm/Support/ErrorHandling.h" 47#include "llvm/Support/MathExtras.h" 48#include "llvm/Support/raw_ostream.h" 49#include "llvm/ADT/SmallPtrSet.h" 50#include "llvm/ADT/Statistic.h" 51using namespace llvm; 52 53STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 54 55//===----------------------------------------------------------------------===// 56// Pattern Matcher Implementation 57//===----------------------------------------------------------------------===// 58 59namespace { 60 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 61 /// SDValue's instead of register numbers for the leaves of the matched 62 /// tree. 63 struct X86ISelAddressMode { 64 enum { 65 RegBase, 66 FrameIndexBase 67 } BaseType; 68 69 struct { // This is really a union, discriminated by BaseType! 70 SDValue Reg; 71 int FrameIndex; 72 } Base; 73 74 unsigned Scale; 75 SDValue IndexReg; 76 int32_t Disp; 77 SDValue Segment; 78 GlobalValue *GV; 79 Constant *CP; 80 BlockAddress *BlockAddr; 81 const char *ES; 82 int JT; 83 unsigned Align; // CP alignment. 84 unsigned char SymbolFlags; // X86II::MO_* 85 86 X86ISelAddressMode() 87 : BaseType(RegBase), Scale(1), IndexReg(), Disp(0), 88 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 89 SymbolFlags(X86II::MO_NO_FLAG) { 90 } 91 92 bool hasSymbolicDisplacement() const { 93 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 94 } 95 96 bool hasBaseOrIndexReg() const { 97 return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0; 98 } 99 100 /// isRIPRelative - Return true if this addressing mode is already RIP 101 /// relative. 102 bool isRIPRelative() const { 103 if (BaseType != RegBase) return false; 104 if (RegisterSDNode *RegNode = 105 dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode())) 106 return RegNode->getReg() == X86::RIP; 107 return false; 108 } 109 110 void setBaseReg(SDValue Reg) { 111 BaseType = RegBase; 112 Base.Reg = Reg; 113 } 114 115 void dump() { 116 dbgs() << "X86ISelAddressMode " << this << '\n'; 117 dbgs() << "Base.Reg "; 118 if (Base.Reg.getNode() != 0) 119 Base.Reg.getNode()->dump(); 120 else 121 dbgs() << "nul"; 122 dbgs() << " Base.FrameIndex " << Base.FrameIndex << '\n' 123 << " Scale" << Scale << '\n' 124 << "IndexReg "; 125 if (IndexReg.getNode() != 0) 126 IndexReg.getNode()->dump(); 127 else 128 dbgs() << "nul"; 129 dbgs() << " Disp " << Disp << '\n' 130 << "GV "; 131 if (GV) 132 GV->dump(); 133 else 134 dbgs() << "nul"; 135 dbgs() << " CP "; 136 if (CP) 137 CP->dump(); 138 else 139 dbgs() << "nul"; 140 dbgs() << '\n' 141 << "ES "; 142 if (ES) 143 dbgs() << ES; 144 else 145 dbgs() << "nul"; 146 dbgs() << " JT" << JT << " Align" << Align << '\n'; 147 } 148 }; 149} 150 151namespace { 152 //===--------------------------------------------------------------------===// 153 /// ISel - X86 specific code to select X86 machine instructions for 154 /// SelectionDAG operations. 155 /// 156 class X86DAGToDAGISel : public SelectionDAGISel { 157 /// X86Lowering - This object fully describes how to lower LLVM code to an 158 /// X86-specific SelectionDAG. 159 X86TargetLowering &X86Lowering; 160 161 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 162 /// make the right decision when generating code for different targets. 163 const X86Subtarget *Subtarget; 164 165 /// OptForSize - If true, selector should try to optimize for code size 166 /// instead of performance. 167 bool OptForSize; 168 169 public: 170 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 171 : SelectionDAGISel(tm, OptLevel), 172 X86Lowering(*tm.getTargetLowering()), 173 Subtarget(&tm.getSubtarget<X86Subtarget>()), 174 OptForSize(false) {} 175 176 virtual const char *getPassName() const { 177 return "X86 DAG->DAG Instruction Selection"; 178 } 179 180 /// InstructionSelect - This callback is invoked by 181 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. 182 virtual void InstructionSelect(); 183 184 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF); 185 186 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 187 188 virtual bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root) const; 189 190// Include the pieces autogenerated from the target description. 191#include "X86GenDAGISel.inc" 192 193 private: 194 SDNode *Select(SDNode *N); 195 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 196 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT); 197 198 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM); 199 bool MatchLoad(SDValue N, X86ISelAddressMode &AM); 200 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 201 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 202 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 203 unsigned Depth); 204 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 205 bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base, 206 SDValue &Scale, SDValue &Index, SDValue &Disp, 207 SDValue &Segment); 208 bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base, 209 SDValue &Scale, SDValue &Index, SDValue &Disp); 210 bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base, 211 SDValue &Scale, SDValue &Index, SDValue &Disp); 212 bool SelectScalarSSELoad(SDNode *Op, SDValue Pred, 213 SDValue N, SDValue &Base, SDValue &Scale, 214 SDValue &Index, SDValue &Disp, 215 SDValue &Segment, 216 SDValue &InChain, SDValue &OutChain); 217 bool TryFoldLoad(SDNode *P, SDValue N, 218 SDValue &Base, SDValue &Scale, 219 SDValue &Index, SDValue &Disp, 220 SDValue &Segment); 221 void PreprocessForRMW(); 222 void PreprocessForFPConvert(); 223 224 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 225 /// inline asm expressions. 226 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 227 char ConstraintCode, 228 std::vector<SDValue> &OutOps); 229 230 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 231 232 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 233 SDValue &Scale, SDValue &Index, 234 SDValue &Disp, SDValue &Segment) { 235 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 236 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) : 237 AM.Base.Reg; 238 Scale = getI8Imm(AM.Scale); 239 Index = AM.IndexReg; 240 // These are 32-bit even in 64-bit mode since RIP relative offset 241 // is 32-bit. 242 if (AM.GV) 243 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp, 244 AM.SymbolFlags); 245 else if (AM.CP) 246 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 247 AM.Align, AM.Disp, AM.SymbolFlags); 248 else if (AM.ES) 249 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 250 else if (AM.JT != -1) 251 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 252 else if (AM.BlockAddr) 253 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32, 254 true, AM.SymbolFlags); 255 else 256 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 257 258 if (AM.Segment.getNode()) 259 Segment = AM.Segment; 260 else 261 Segment = CurDAG->getRegister(0, MVT::i32); 262 } 263 264 /// getI8Imm - Return a target constant with the specified value, of type 265 /// i8. 266 inline SDValue getI8Imm(unsigned Imm) { 267 return CurDAG->getTargetConstant(Imm, MVT::i8); 268 } 269 270 /// getI16Imm - Return a target constant with the specified value, of type 271 /// i16. 272 inline SDValue getI16Imm(unsigned Imm) { 273 return CurDAG->getTargetConstant(Imm, MVT::i16); 274 } 275 276 /// getI32Imm - Return a target constant with the specified value, of type 277 /// i32. 278 inline SDValue getI32Imm(unsigned Imm) { 279 return CurDAG->getTargetConstant(Imm, MVT::i32); 280 } 281 282 /// getGlobalBaseReg - Return an SDNode that returns the value of 283 /// the global base register. Output instructions required to 284 /// initialize the global base register, if necessary. 285 /// 286 SDNode *getGlobalBaseReg(); 287 288 /// getTargetMachine - Return a reference to the TargetMachine, casted 289 /// to the target-specific type. 290 const X86TargetMachine &getTargetMachine() { 291 return static_cast<const X86TargetMachine &>(TM); 292 } 293 294 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 295 /// to the target-specific type. 296 const X86InstrInfo *getInstrInfo() { 297 return getTargetMachine().getInstrInfo(); 298 } 299 300#ifndef NDEBUG 301 unsigned Indent; 302#endif 303 }; 304} 305 306 307bool 308X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 309 if (OptLevel == CodeGenOpt::None) return false; 310 311 if (!N.hasOneUse()) 312 return false; 313 314 if (N.getOpcode() != ISD::LOAD) 315 return true; 316 317 // If N is a load, do additional profitability checks. 318 if (U == Root) { 319 switch (U->getOpcode()) { 320 default: break; 321 case X86ISD::ADD: 322 case X86ISD::SUB: 323 case X86ISD::AND: 324 case X86ISD::XOR: 325 case X86ISD::OR: 326 case ISD::ADD: 327 case ISD::ADDC: 328 case ISD::ADDE: 329 case ISD::AND: 330 case ISD::OR: 331 case ISD::XOR: { 332 SDValue Op1 = U->getOperand(1); 333 334 // If the other operand is a 8-bit immediate we should fold the immediate 335 // instead. This reduces code size. 336 // e.g. 337 // movl 4(%esp), %eax 338 // addl $4, %eax 339 // vs. 340 // movl $4, %eax 341 // addl 4(%esp), %eax 342 // The former is 2 bytes shorter. In case where the increment is 1, then 343 // the saving can be 4 bytes (by using incl %eax). 344 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 345 if (Imm->getAPIntValue().isSignedIntN(8)) 346 return false; 347 348 // If the other operand is a TLS address, we should fold it instead. 349 // This produces 350 // movl %gs:0, %eax 351 // leal i@NTPOFF(%eax), %eax 352 // instead of 353 // movl $i@NTPOFF, %eax 354 // addl %gs:0, %eax 355 // if the block also has an access to a second TLS address this will save 356 // a load. 357 // FIXME: This is probably also true for non TLS addresses. 358 if (Op1.getOpcode() == X86ISD::Wrapper) { 359 SDValue Val = Op1.getOperand(0); 360 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 361 return false; 362 } 363 } 364 } 365 } 366 367 return true; 368} 369 370 371bool X86DAGToDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root) const { 372 if (OptLevel == CodeGenOpt::None) return false; 373 374 // Proceed to 'generic' cycle finder code 375 return SelectionDAGISel::IsLegalToFold(N, U, Root); 376} 377 378/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand 379/// and move load below the TokenFactor. Replace store's chain operand with 380/// load's chain result. 381static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load, 382 SDValue Store, SDValue TF) { 383 SmallVector<SDValue, 4> Ops; 384 for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i) 385 if (Load.getNode() == TF.getOperand(i).getNode()) 386 Ops.push_back(Load.getOperand(0)); 387 else 388 Ops.push_back(TF.getOperand(i)); 389 SDValue NewTF = CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size()); 390 SDValue NewLoad = CurDAG->UpdateNodeOperands(Load, NewTF, 391 Load.getOperand(1), 392 Load.getOperand(2)); 393 CurDAG->UpdateNodeOperands(Store, NewLoad.getValue(1), Store.getOperand(1), 394 Store.getOperand(2), Store.getOperand(3)); 395} 396 397/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG. The 398/// chain produced by the load must only be used by the store's chain operand, 399/// otherwise this may produce a cycle in the DAG. 400/// 401static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address, 402 SDValue &Load) { 403 if (N.getOpcode() == ISD::BIT_CONVERT) { 404 if (!N.hasOneUse()) 405 return false; 406 N = N.getOperand(0); 407 } 408 409 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 410 if (!LD || LD->isVolatile()) 411 return false; 412 if (LD->getAddressingMode() != ISD::UNINDEXED) 413 return false; 414 415 ISD::LoadExtType ExtType = LD->getExtensionType(); 416 if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD) 417 return false; 418 419 if (N.hasOneUse() && 420 LD->hasNUsesOfValue(1, 1) && 421 N.getOperand(1) == Address && 422 LD->isOperandOf(Chain.getNode())) { 423 Load = N; 424 return true; 425 } 426 return false; 427} 428 429/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain 430/// operand and move load below the call's chain operand. 431static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load, 432 SDValue Call, SDValue CallSeqStart) { 433 SmallVector<SDValue, 8> Ops; 434 SDValue Chain = CallSeqStart.getOperand(0); 435 if (Chain.getNode() == Load.getNode()) 436 Ops.push_back(Load.getOperand(0)); 437 else { 438 assert(Chain.getOpcode() == ISD::TokenFactor && 439 "Unexpected CallSeqStart chain operand"); 440 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 441 if (Chain.getOperand(i).getNode() == Load.getNode()) 442 Ops.push_back(Load.getOperand(0)); 443 else 444 Ops.push_back(Chain.getOperand(i)); 445 SDValue NewChain = 446 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 447 MVT::Other, &Ops[0], Ops.size()); 448 Ops.clear(); 449 Ops.push_back(NewChain); 450 } 451 for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i) 452 Ops.push_back(CallSeqStart.getOperand(i)); 453 CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size()); 454 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0), 455 Load.getOperand(1), Load.getOperand(2)); 456 Ops.clear(); 457 Ops.push_back(SDValue(Load.getNode(), 1)); 458 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) 459 Ops.push_back(Call.getOperand(i)); 460 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size()); 461} 462 463/// isCalleeLoad - Return true if call address is a load and it can be 464/// moved below CALLSEQ_START and the chains leading up to the call. 465/// Return the CALLSEQ_START by reference as a second output. 466static bool isCalleeLoad(SDValue Callee, SDValue &Chain) { 467 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 468 return false; 469 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 470 if (!LD || 471 LD->isVolatile() || 472 LD->getAddressingMode() != ISD::UNINDEXED || 473 LD->getExtensionType() != ISD::NON_EXTLOAD) 474 return false; 475 476 // Now let's find the callseq_start. 477 while (Chain.getOpcode() != ISD::CALLSEQ_START) { 478 if (!Chain.hasOneUse()) 479 return false; 480 Chain = Chain.getOperand(0); 481 } 482 483 if (Chain.getOperand(0).getNode() == Callee.getNode()) 484 return true; 485 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 486 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 487 Callee.getValue(1).hasOneUse()) 488 return true; 489 return false; 490} 491 492 493/// PreprocessForRMW - Preprocess the DAG to make instruction selection better. 494/// This is only run if not in -O0 mode. 495/// This allows the instruction selector to pick more read-modify-write 496/// instructions. This is a common case: 497/// 498/// [Load chain] 499/// ^ 500/// | 501/// [Load] 502/// ^ ^ 503/// | | 504/// / \- 505/// / | 506/// [TokenFactor] [Op] 507/// ^ ^ 508/// | | 509/// \ / 510/// \ / 511/// [Store] 512/// 513/// The fact the store's chain operand != load's chain will prevent the 514/// (store (op (load))) instruction from being selected. We can transform it to: 515/// 516/// [Load chain] 517/// ^ 518/// | 519/// [TokenFactor] 520/// ^ 521/// | 522/// [Load] 523/// ^ ^ 524/// | | 525/// | \- 526/// | | 527/// | [Op] 528/// | ^ 529/// | | 530/// \ / 531/// \ / 532/// [Store] 533void X86DAGToDAGISel::PreprocessForRMW() { 534 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 535 E = CurDAG->allnodes_end(); I != E; ++I) { 536 if (I->getOpcode() == X86ISD::CALL) { 537 /// Also try moving call address load from outside callseq_start to just 538 /// before the call to allow it to be folded. 539 /// 540 /// [Load chain] 541 /// ^ 542 /// | 543 /// [Load] 544 /// ^ ^ 545 /// | | 546 /// / \-- 547 /// / | 548 ///[CALLSEQ_START] | 549 /// ^ | 550 /// | | 551 /// [LOAD/C2Reg] | 552 /// | | 553 /// \ / 554 /// \ / 555 /// [CALL] 556 SDValue Chain = I->getOperand(0); 557 SDValue Load = I->getOperand(1); 558 if (!isCalleeLoad(Load, Chain)) 559 continue; 560 MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain); 561 ++NumLoadMoved; 562 continue; 563 } 564 565 if (!ISD::isNON_TRUNCStore(I)) 566 continue; 567 SDValue Chain = I->getOperand(0); 568 569 if (Chain.getNode()->getOpcode() != ISD::TokenFactor) 570 continue; 571 572 SDValue N1 = I->getOperand(1); 573 SDValue N2 = I->getOperand(2); 574 if ((N1.getValueType().isFloatingPoint() && 575 !N1.getValueType().isVector()) || 576 !N1.hasOneUse()) 577 continue; 578 579 bool RModW = false; 580 SDValue Load; 581 unsigned Opcode = N1.getNode()->getOpcode(); 582 switch (Opcode) { 583 case ISD::ADD: 584 case ISD::MUL: 585 case ISD::AND: 586 case ISD::OR: 587 case ISD::XOR: 588 case ISD::ADDC: 589 case ISD::ADDE: 590 case ISD::VECTOR_SHUFFLE: { 591 SDValue N10 = N1.getOperand(0); 592 SDValue N11 = N1.getOperand(1); 593 RModW = isRMWLoad(N10, Chain, N2, Load); 594 if (!RModW) 595 RModW = isRMWLoad(N11, Chain, N2, Load); 596 break; 597 } 598 case ISD::SUB: 599 case ISD::SHL: 600 case ISD::SRA: 601 case ISD::SRL: 602 case ISD::ROTL: 603 case ISD::ROTR: 604 case ISD::SUBC: 605 case ISD::SUBE: 606 case X86ISD::SHLD: 607 case X86ISD::SHRD: { 608 SDValue N10 = N1.getOperand(0); 609 RModW = isRMWLoad(N10, Chain, N2, Load); 610 break; 611 } 612 } 613 614 if (RModW) { 615 MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain); 616 ++NumLoadMoved; 617 checkForCycles(I); 618 } 619 } 620} 621 622 623/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend 624/// nodes that target the FP stack to be store and load to the stack. This is a 625/// gross hack. We would like to simply mark these as being illegal, but when 626/// we do that, legalize produces these when it expands calls, then expands 627/// these in the same legalize pass. We would like dag combine to be able to 628/// hack on these between the call expansion and the node legalization. As such 629/// this pass basically does "really late" legalization of these inline with the 630/// X86 isel pass. 631void X86DAGToDAGISel::PreprocessForFPConvert() { 632 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 633 E = CurDAG->allnodes_end(); I != E; ) { 634 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 635 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 636 continue; 637 638 // If the source and destination are SSE registers, then this is a legal 639 // conversion that should not be lowered. 640 EVT SrcVT = N->getOperand(0).getValueType(); 641 EVT DstVT = N->getValueType(0); 642 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 643 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 644 if (SrcIsSSE && DstIsSSE) 645 continue; 646 647 if (!SrcIsSSE && !DstIsSSE) { 648 // If this is an FPStack extension, it is a noop. 649 if (N->getOpcode() == ISD::FP_EXTEND) 650 continue; 651 // If this is a value-preserving FPStack truncation, it is a noop. 652 if (N->getConstantOperandVal(1)) 653 continue; 654 } 655 656 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 657 // FPStack has extload and truncstore. SSE can fold direct loads into other 658 // operations. Based on this, decide what we want to do. 659 EVT MemVT; 660 if (N->getOpcode() == ISD::FP_ROUND) 661 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 662 else 663 MemVT = SrcIsSSE ? SrcVT : DstVT; 664 665 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 666 DebugLoc dl = N->getDebugLoc(); 667 668 // FIXME: optimize the case where the src/dest is a load or store? 669 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 670 N->getOperand(0), 671 MemTmp, NULL, 0, MemVT, 672 false, false, 0); 673 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 674 NULL, 0, MemVT, false, false, 0); 675 676 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 677 // extload we created. This will cause general havok on the dag because 678 // anything below the conversion could be folded into other existing nodes. 679 // To avoid invalidating 'I', back it up to the convert node. 680 --I; 681 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 682 683 // Now that we did that, the node is dead. Increment the iterator to the 684 // next node to process, then delete N. 685 ++I; 686 CurDAG->DeleteNode(N); 687 } 688} 689 690/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel 691/// when it has created a SelectionDAG for us to codegen. 692void X86DAGToDAGISel::InstructionSelect() { 693 const Function *F = MF->getFunction(); 694 OptForSize = F->hasFnAttr(Attribute::OptimizeForSize); 695 696 if (OptLevel != CodeGenOpt::None) 697 PreprocessForRMW(); 698 699 // FIXME: This should only happen when not compiled with -O0. 700 PreprocessForFPConvert(); 701 702 // Codegen the basic block. 703#ifndef NDEBUG 704 DEBUG(dbgs() << "===== Instruction selection begins:\n"); 705 Indent = 0; 706#endif 707 SelectRoot(*CurDAG); 708#ifndef NDEBUG 709 DEBUG(dbgs() << "===== Instruction selection ends:\n"); 710#endif 711 712 CurDAG->RemoveDeadNodes(); 713} 714 715/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 716/// the main function. 717void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 718 MachineFrameInfo *MFI) { 719 const TargetInstrInfo *TII = TM.getInstrInfo(); 720 if (Subtarget->isTargetCygMing()) 721 BuildMI(BB, DebugLoc::getUnknownLoc(), 722 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main"); 723} 724 725void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) { 726 // If this is main, emit special code for main. 727 MachineBasicBlock *BB = MF.begin(); 728 if (Fn.hasExternalLinkage() && Fn.getName() == "main") 729 EmitSpecialCodeForMain(BB, MF.getFrameInfo()); 730} 731 732 733bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N, 734 X86ISelAddressMode &AM) { 735 assert(N.getOpcode() == X86ISD::SegmentBaseAddress); 736 SDValue Segment = N.getOperand(0); 737 738 if (AM.Segment.getNode() == 0) { 739 AM.Segment = Segment; 740 return false; 741 } 742 743 return true; 744} 745 746bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) { 747 // This optimization is valid because the GNU TLS model defines that 748 // gs:0 (or fs:0 on X86-64) contains its own address. 749 // For more information see http://people.redhat.com/drepper/tls.pdf 750 751 SDValue Address = N.getOperand(1); 752 if (Address.getOpcode() == X86ISD::SegmentBaseAddress && 753 !MatchSegmentBaseAddress (Address, AM)) 754 return false; 755 756 return true; 757} 758 759/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 760/// into an addressing mode. These wrap things that will resolve down into a 761/// symbol reference. If no match is possible, this returns true, otherwise it 762/// returns false. 763bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 764 // If the addressing mode already has a symbol as the displacement, we can 765 // never match another symbol. 766 if (AM.hasSymbolicDisplacement()) 767 return true; 768 769 SDValue N0 = N.getOperand(0); 770 CodeModel::Model M = TM.getCodeModel(); 771 772 // Handle X86-64 rip-relative addresses. We check this before checking direct 773 // folding because RIP is preferable to non-RIP accesses. 774 if (Subtarget->is64Bit() && 775 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 776 // they cannot be folded into immediate fields. 777 // FIXME: This can be improved for kernel and other models? 778 (M == CodeModel::Small || M == CodeModel::Kernel) && 779 // Base and index reg must be 0 in order to use %rip as base and lowering 780 // must allow RIP. 781 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) { 782 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 783 int64_t Offset = AM.Disp + G->getOffset(); 784 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true; 785 AM.GV = G->getGlobal(); 786 AM.Disp = Offset; 787 AM.SymbolFlags = G->getTargetFlags(); 788 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 789 int64_t Offset = AM.Disp + CP->getOffset(); 790 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true; 791 AM.CP = CP->getConstVal(); 792 AM.Align = CP->getAlignment(); 793 AM.Disp = Offset; 794 AM.SymbolFlags = CP->getTargetFlags(); 795 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 796 AM.ES = S->getSymbol(); 797 AM.SymbolFlags = S->getTargetFlags(); 798 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 799 AM.JT = J->getIndex(); 800 AM.SymbolFlags = J->getTargetFlags(); 801 } else { 802 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 803 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 804 } 805 806 if (N.getOpcode() == X86ISD::WrapperRIP) 807 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 808 return false; 809 } 810 811 // Handle the case when globals fit in our immediate field: This is true for 812 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit 813 // mode, this results in a non-RIP-relative computation. 814 if (!Subtarget->is64Bit() || 815 ((M == CodeModel::Small || M == CodeModel::Kernel) && 816 TM.getRelocationModel() == Reloc::Static)) { 817 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 818 AM.GV = G->getGlobal(); 819 AM.Disp += G->getOffset(); 820 AM.SymbolFlags = G->getTargetFlags(); 821 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 822 AM.CP = CP->getConstVal(); 823 AM.Align = CP->getAlignment(); 824 AM.Disp += CP->getOffset(); 825 AM.SymbolFlags = CP->getTargetFlags(); 826 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 827 AM.ES = S->getSymbol(); 828 AM.SymbolFlags = S->getTargetFlags(); 829 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 830 AM.JT = J->getIndex(); 831 AM.SymbolFlags = J->getTargetFlags(); 832 } else { 833 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress(); 834 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags(); 835 } 836 return false; 837 } 838 839 return true; 840} 841 842/// MatchAddress - Add the specified node to the specified addressing mode, 843/// returning true if it cannot be done. This just pattern matches for the 844/// addressing mode. 845bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 846 if (MatchAddressRecursively(N, AM, 0)) 847 return true; 848 849 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 850 // a smaller encoding and avoids a scaled-index. 851 if (AM.Scale == 2 && 852 AM.BaseType == X86ISelAddressMode::RegBase && 853 AM.Base.Reg.getNode() == 0) { 854 AM.Base.Reg = AM.IndexReg; 855 AM.Scale = 1; 856 } 857 858 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 859 // because it has a smaller encoding. 860 // TODO: Which other code models can use this? 861 if (TM.getCodeModel() == CodeModel::Small && 862 Subtarget->is64Bit() && 863 AM.Scale == 1 && 864 AM.BaseType == X86ISelAddressMode::RegBase && 865 AM.Base.Reg.getNode() == 0 && 866 AM.IndexReg.getNode() == 0 && 867 AM.SymbolFlags == X86II::MO_NO_FLAG && 868 AM.hasSymbolicDisplacement()) 869 AM.Base.Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 870 871 return false; 872} 873 874bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 875 unsigned Depth) { 876 bool is64Bit = Subtarget->is64Bit(); 877 DebugLoc dl = N.getDebugLoc(); 878 DEBUG({ 879 dbgs() << "MatchAddress: "; 880 AM.dump(); 881 }); 882 // Limit recursion. 883 if (Depth > 5) 884 return MatchAddressBase(N, AM); 885 886 CodeModel::Model M = TM.getCodeModel(); 887 888 // If this is already a %rip relative address, we can only merge immediates 889 // into it. Instead of handling this in every case, we handle it here. 890 // RIP relative addressing: %rip + 32-bit displacement! 891 if (AM.isRIPRelative()) { 892 // FIXME: JumpTable and ExternalSymbol address currently don't like 893 // displacements. It isn't very important, but this should be fixed for 894 // consistency. 895 if (!AM.ES && AM.JT != -1) return true; 896 897 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) { 898 int64_t Val = AM.Disp + Cst->getSExtValue(); 899 if (X86::isOffsetSuitableForCodeModel(Val, M, 900 AM.hasSymbolicDisplacement())) { 901 AM.Disp = Val; 902 return false; 903 } 904 } 905 return true; 906 } 907 908 switch (N.getOpcode()) { 909 default: break; 910 case ISD::Constant: { 911 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 912 if (!is64Bit || 913 X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M, 914 AM.hasSymbolicDisplacement())) { 915 AM.Disp += Val; 916 return false; 917 } 918 break; 919 } 920 921 case X86ISD::SegmentBaseAddress: 922 if (!MatchSegmentBaseAddress(N, AM)) 923 return false; 924 break; 925 926 case X86ISD::Wrapper: 927 case X86ISD::WrapperRIP: 928 if (!MatchWrapper(N, AM)) 929 return false; 930 break; 931 932 case ISD::LOAD: 933 if (!MatchLoad(N, AM)) 934 return false; 935 break; 936 937 case ISD::FrameIndex: 938 if (AM.BaseType == X86ISelAddressMode::RegBase 939 && AM.Base.Reg.getNode() == 0) { 940 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 941 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 942 return false; 943 } 944 break; 945 946 case ISD::SHL: 947 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 948 break; 949 950 if (ConstantSDNode 951 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 952 unsigned Val = CN->getZExtValue(); 953 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 954 // that the base operand remains free for further matching. If 955 // the base doesn't end up getting used, a post-processing step 956 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 957 if (Val == 1 || Val == 2 || Val == 3) { 958 AM.Scale = 1 << Val; 959 SDValue ShVal = N.getNode()->getOperand(0); 960 961 // Okay, we know that we have a scale by now. However, if the scaled 962 // value is an add of something and a constant, we can fold the 963 // constant into the disp field here. 964 if (ShVal.getNode()->getOpcode() == ISD::ADD && 965 isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) { 966 AM.IndexReg = ShVal.getNode()->getOperand(0); 967 ConstantSDNode *AddVal = 968 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 969 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val); 970 if (!is64Bit || 971 X86::isOffsetSuitableForCodeModel(Disp, M, 972 AM.hasSymbolicDisplacement())) 973 AM.Disp = Disp; 974 else 975 AM.IndexReg = ShVal; 976 } else { 977 AM.IndexReg = ShVal; 978 } 979 return false; 980 } 981 break; 982 } 983 984 case ISD::SMUL_LOHI: 985 case ISD::UMUL_LOHI: 986 // A mul_lohi where we need the low part can be folded as a plain multiply. 987 if (N.getResNo() != 0) break; 988 // FALL THROUGH 989 case ISD::MUL: 990 case X86ISD::MUL_IMM: 991 // X*[3,5,9] -> X+X*[2,4,8] 992 if (AM.BaseType == X86ISelAddressMode::RegBase && 993 AM.Base.Reg.getNode() == 0 && 994 AM.IndexReg.getNode() == 0) { 995 if (ConstantSDNode 996 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 997 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 998 CN->getZExtValue() == 9) { 999 AM.Scale = unsigned(CN->getZExtValue())-1; 1000 1001 SDValue MulVal = N.getNode()->getOperand(0); 1002 SDValue Reg; 1003 1004 // Okay, we know that we have a scale by now. However, if the scaled 1005 // value is an add of something and a constant, we can fold the 1006 // constant into the disp field here. 1007 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1008 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1009 Reg = MulVal.getNode()->getOperand(0); 1010 ConstantSDNode *AddVal = 1011 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1012 uint64_t Disp = AM.Disp + AddVal->getSExtValue() * 1013 CN->getZExtValue(); 1014 if (!is64Bit || 1015 X86::isOffsetSuitableForCodeModel(Disp, M, 1016 AM.hasSymbolicDisplacement())) 1017 AM.Disp = Disp; 1018 else 1019 Reg = N.getNode()->getOperand(0); 1020 } else { 1021 Reg = N.getNode()->getOperand(0); 1022 } 1023 1024 AM.IndexReg = AM.Base.Reg = Reg; 1025 return false; 1026 } 1027 } 1028 break; 1029 1030 case ISD::SUB: { 1031 // Given A-B, if A can be completely folded into the address and 1032 // the index field with the index field unused, use -B as the index. 1033 // This is a win if a has multiple parts that can be folded into 1034 // the address. Also, this saves a mov if the base register has 1035 // other uses, since it avoids a two-address sub instruction, however 1036 // it costs an additional mov if the index register has other uses. 1037 1038 // Test if the LHS of the sub can be folded. 1039 X86ISelAddressMode Backup = AM; 1040 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1041 AM = Backup; 1042 break; 1043 } 1044 // Test if the index field is free for use. 1045 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1046 AM = Backup; 1047 break; 1048 } 1049 int Cost = 0; 1050 SDValue RHS = N.getNode()->getOperand(1); 1051 // If the RHS involves a register with multiple uses, this 1052 // transformation incurs an extra mov, due to the neg instruction 1053 // clobbering its operand. 1054 if (!RHS.getNode()->hasOneUse() || 1055 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1056 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1057 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1058 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1059 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1060 ++Cost; 1061 // If the base is a register with multiple uses, this 1062 // transformation may save a mov. 1063 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1064 AM.Base.Reg.getNode() && 1065 !AM.Base.Reg.getNode()->hasOneUse()) || 1066 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1067 --Cost; 1068 // If the folded LHS was interesting, this transformation saves 1069 // address arithmetic. 1070 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1071 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1072 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1073 --Cost; 1074 // If it doesn't look like it may be an overall win, don't do it. 1075 if (Cost >= 0) { 1076 AM = Backup; 1077 break; 1078 } 1079 1080 // Ok, the transformation is legal and appears profitable. Go for it. 1081 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1082 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1083 AM.IndexReg = Neg; 1084 AM.Scale = 1; 1085 1086 // Insert the new nodes into the topological ordering. 1087 if (Zero.getNode()->getNodeId() == -1 || 1088 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1089 CurDAG->RepositionNode(N.getNode(), Zero.getNode()); 1090 Zero.getNode()->setNodeId(N.getNode()->getNodeId()); 1091 } 1092 if (Neg.getNode()->getNodeId() == -1 || 1093 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1094 CurDAG->RepositionNode(N.getNode(), Neg.getNode()); 1095 Neg.getNode()->setNodeId(N.getNode()->getNodeId()); 1096 } 1097 return false; 1098 } 1099 1100 case ISD::ADD: { 1101 X86ISelAddressMode Backup = AM; 1102 if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1) && 1103 !MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1)) 1104 return false; 1105 AM = Backup; 1106 if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1) && 1107 !MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) 1108 return false; 1109 AM = Backup; 1110 1111 // If we couldn't fold both operands into the address at the same time, 1112 // see if we can just put each operand into a register and fold at least 1113 // the add. 1114 if (AM.BaseType == X86ISelAddressMode::RegBase && 1115 !AM.Base.Reg.getNode() && 1116 !AM.IndexReg.getNode()) { 1117 AM.Base.Reg = N.getNode()->getOperand(0); 1118 AM.IndexReg = N.getNode()->getOperand(1); 1119 AM.Scale = 1; 1120 return false; 1121 } 1122 break; 1123 } 1124 1125 case ISD::OR: 1126 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1127 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1128 X86ISelAddressMode Backup = AM; 1129 uint64_t Offset = CN->getSExtValue(); 1130 // Start with the LHS as an addr mode. 1131 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1132 // Address could not have picked a GV address for the displacement. 1133 AM.GV == NULL && 1134 // On x86-64, the resultant disp must fit in 32-bits. 1135 (!is64Bit || 1136 X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M, 1137 AM.hasSymbolicDisplacement())) && 1138 // Check to see if the LHS & C is zero. 1139 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) { 1140 AM.Disp += Offset; 1141 return false; 1142 } 1143 AM = Backup; 1144 } 1145 break; 1146 1147 case ISD::AND: { 1148 // Perform some heroic transforms on an and of a constant-count shift 1149 // with a constant to enable use of the scaled offset field. 1150 1151 SDValue Shift = N.getOperand(0); 1152 if (Shift.getNumOperands() != 2) break; 1153 1154 // Scale must not be used already. 1155 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1156 1157 SDValue X = Shift.getOperand(0); 1158 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1159 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 1160 if (!C1 || !C2) break; 1161 1162 // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This 1163 // allows us to convert the shift and and into an h-register extract and 1164 // a scaled index. 1165 if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) { 1166 unsigned ScaleLog = 8 - C1->getZExtValue(); 1167 if (ScaleLog > 0 && ScaleLog < 4 && 1168 C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) { 1169 SDValue Eight = CurDAG->getConstant(8, MVT::i8); 1170 SDValue Mask = CurDAG->getConstant(0xff, N.getValueType()); 1171 SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(), 1172 X, Eight); 1173 SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(), 1174 Srl, Mask); 1175 SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8); 1176 SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(), 1177 And, ShlCount); 1178 1179 // Insert the new nodes into the topological ordering. 1180 if (Eight.getNode()->getNodeId() == -1 || 1181 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1182 CurDAG->RepositionNode(X.getNode(), Eight.getNode()); 1183 Eight.getNode()->setNodeId(X.getNode()->getNodeId()); 1184 } 1185 if (Mask.getNode()->getNodeId() == -1 || 1186 Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1187 CurDAG->RepositionNode(X.getNode(), Mask.getNode()); 1188 Mask.getNode()->setNodeId(X.getNode()->getNodeId()); 1189 } 1190 if (Srl.getNode()->getNodeId() == -1 || 1191 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 1192 CurDAG->RepositionNode(Shift.getNode(), Srl.getNode()); 1193 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId()); 1194 } 1195 if (And.getNode()->getNodeId() == -1 || 1196 And.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1197 CurDAG->RepositionNode(N.getNode(), And.getNode()); 1198 And.getNode()->setNodeId(N.getNode()->getNodeId()); 1199 } 1200 if (ShlCount.getNode()->getNodeId() == -1 || 1201 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1202 CurDAG->RepositionNode(X.getNode(), ShlCount.getNode()); 1203 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId()); 1204 } 1205 if (Shl.getNode()->getNodeId() == -1 || 1206 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1207 CurDAG->RepositionNode(N.getNode(), Shl.getNode()); 1208 Shl.getNode()->setNodeId(N.getNode()->getNodeId()); 1209 } 1210 CurDAG->ReplaceAllUsesWith(N, Shl); 1211 AM.IndexReg = And; 1212 AM.Scale = (1 << ScaleLog); 1213 return false; 1214 } 1215 } 1216 1217 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this 1218 // allows us to fold the shift into this addressing mode. 1219 if (Shift.getOpcode() != ISD::SHL) break; 1220 1221 // Not likely to be profitable if either the AND or SHIFT node has more 1222 // than one use (unless all uses are for address computation). Besides, 1223 // isel mechanism requires their node ids to be reused. 1224 if (!N.hasOneUse() || !Shift.hasOneUse()) 1225 break; 1226 1227 // Verify that the shift amount is something we can fold. 1228 unsigned ShiftCst = C1->getZExtValue(); 1229 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3) 1230 break; 1231 1232 // Get the new AND mask, this folds to a constant. 1233 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(), 1234 SDValue(C2, 0), SDValue(C1, 0)); 1235 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X, 1236 NewANDMask); 1237 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(), 1238 NewAND, SDValue(C1, 0)); 1239 1240 // Insert the new nodes into the topological ordering. 1241 if (C1->getNodeId() > X.getNode()->getNodeId()) { 1242 CurDAG->RepositionNode(X.getNode(), C1); 1243 C1->setNodeId(X.getNode()->getNodeId()); 1244 } 1245 if (NewANDMask.getNode()->getNodeId() == -1 || 1246 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) { 1247 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode()); 1248 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId()); 1249 } 1250 if (NewAND.getNode()->getNodeId() == -1 || 1251 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) { 1252 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode()); 1253 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId()); 1254 } 1255 if (NewSHIFT.getNode()->getNodeId() == -1 || 1256 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) { 1257 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode()); 1258 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId()); 1259 } 1260 1261 CurDAG->ReplaceAllUsesWith(N, NewSHIFT); 1262 1263 AM.Scale = 1 << ShiftCst; 1264 AM.IndexReg = NewAND; 1265 return false; 1266 } 1267 } 1268 1269 return MatchAddressBase(N, AM); 1270} 1271 1272/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1273/// specified addressing mode without any further recursion. 1274bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1275 // Is the base register already occupied? 1276 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) { 1277 // If so, check to see if the scale index register is set. 1278 if (AM.IndexReg.getNode() == 0) { 1279 AM.IndexReg = N; 1280 AM.Scale = 1; 1281 return false; 1282 } 1283 1284 // Otherwise, we cannot select it. 1285 return true; 1286 } 1287 1288 // Default, generate it as a register. 1289 AM.BaseType = X86ISelAddressMode::RegBase; 1290 AM.Base.Reg = N; 1291 return false; 1292} 1293 1294/// SelectAddr - returns true if it is able pattern match an addressing mode. 1295/// It returns the operands which make up the maximal addressing mode it can 1296/// match by reference. 1297bool X86DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base, 1298 SDValue &Scale, SDValue &Index, 1299 SDValue &Disp, SDValue &Segment) { 1300 X86ISelAddressMode AM; 1301 if (MatchAddress(N, AM)) 1302 return false; 1303 1304 EVT VT = N.getValueType(); 1305 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1306 if (!AM.Base.Reg.getNode()) 1307 AM.Base.Reg = CurDAG->getRegister(0, VT); 1308 } 1309 1310 if (!AM.IndexReg.getNode()) 1311 AM.IndexReg = CurDAG->getRegister(0, VT); 1312 1313 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1314 return true; 1315} 1316 1317/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1318/// match a load whose top elements are either undef or zeros. The load flavor 1319/// is derived from the type of N, which is either v4f32 or v2f64. 1320bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Op, SDValue Pred, 1321 SDValue N, SDValue &Base, 1322 SDValue &Scale, SDValue &Index, 1323 SDValue &Disp, SDValue &Segment, 1324 SDValue &InChain, 1325 SDValue &OutChain) { 1326 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1327 InChain = N.getOperand(0).getValue(1); 1328 if (ISD::isNON_EXTLoad(InChain.getNode()) && 1329 InChain.getValue(0).hasOneUse() && 1330 IsProfitableToFold(N, Pred.getNode(), Op) && 1331 IsLegalToFold(N, Pred.getNode(), Op)) { 1332 LoadSDNode *LD = cast<LoadSDNode>(InChain); 1333 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1334 return false; 1335 OutChain = LD->getChain(); 1336 return true; 1337 } 1338 } 1339 1340 // Also handle the case where we explicitly require zeros in the top 1341 // elements. This is a vector shuffle from the zero vector. 1342 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1343 // Check to see if the top elements are all zeros (or bitcast of zeros). 1344 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1345 N.getOperand(0).getNode()->hasOneUse() && 1346 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1347 N.getOperand(0).getOperand(0).hasOneUse()) { 1348 // Okay, this is a zero extending load. Fold it. 1349 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1350 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1351 return false; 1352 OutChain = LD->getChain(); 1353 InChain = SDValue(LD, 1); 1354 return true; 1355 } 1356 return false; 1357} 1358 1359 1360/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1361/// mode it matches can be cost effectively emitted as an LEA instruction. 1362bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N, 1363 SDValue &Base, SDValue &Scale, 1364 SDValue &Index, SDValue &Disp) { 1365 X86ISelAddressMode AM; 1366 1367 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1368 // segments. 1369 SDValue Copy = AM.Segment; 1370 SDValue T = CurDAG->getRegister(0, MVT::i32); 1371 AM.Segment = T; 1372 if (MatchAddress(N, AM)) 1373 return false; 1374 assert (T == AM.Segment); 1375 AM.Segment = Copy; 1376 1377 EVT VT = N.getValueType(); 1378 unsigned Complexity = 0; 1379 if (AM.BaseType == X86ISelAddressMode::RegBase) 1380 if (AM.Base.Reg.getNode()) 1381 Complexity = 1; 1382 else 1383 AM.Base.Reg = CurDAG->getRegister(0, VT); 1384 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1385 Complexity = 4; 1386 1387 if (AM.IndexReg.getNode()) 1388 Complexity++; 1389 else 1390 AM.IndexReg = CurDAG->getRegister(0, VT); 1391 1392 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1393 // a simple shift. 1394 if (AM.Scale > 1) 1395 Complexity++; 1396 1397 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1398 // to a LEA. This is determined with some expermentation but is by no means 1399 // optimal (especially for code size consideration). LEA is nice because of 1400 // its three-address nature. Tweak the cost function again when we can run 1401 // convertToThreeAddress() at register allocation time. 1402 if (AM.hasSymbolicDisplacement()) { 1403 // For X86-64, we should always use lea to materialize RIP relative 1404 // addresses. 1405 if (Subtarget->is64Bit()) 1406 Complexity = 4; 1407 else 1408 Complexity += 2; 1409 } 1410 1411 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode())) 1412 Complexity++; 1413 1414 // If it isn't worth using an LEA, reject it. 1415 if (Complexity <= 2) 1416 return false; 1417 1418 SDValue Segment; 1419 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1420 return true; 1421} 1422 1423/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1424bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base, 1425 SDValue &Scale, SDValue &Index, 1426 SDValue &Disp) { 1427 assert(Op->getOpcode() == X86ISD::TLSADDR); 1428 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1429 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1430 1431 X86ISelAddressMode AM; 1432 AM.GV = GA->getGlobal(); 1433 AM.Disp += GA->getOffset(); 1434 AM.Base.Reg = CurDAG->getRegister(0, N.getValueType()); 1435 AM.SymbolFlags = GA->getTargetFlags(); 1436 1437 if (N.getValueType() == MVT::i32) { 1438 AM.Scale = 1; 1439 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1440 } else { 1441 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1442 } 1443 1444 SDValue Segment; 1445 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1446 return true; 1447} 1448 1449 1450bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1451 SDValue &Base, SDValue &Scale, 1452 SDValue &Index, SDValue &Disp, 1453 SDValue &Segment) { 1454 if (ISD::isNON_EXTLoad(N.getNode()) && 1455 IsProfitableToFold(N, P, P) && 1456 IsLegalToFold(N, P, P)) 1457 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment); 1458 return false; 1459} 1460 1461/// getGlobalBaseReg - Return an SDNode that returns the value of 1462/// the global base register. Output instructions required to 1463/// initialize the global base register, if necessary. 1464/// 1465SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1466 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1467 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1468} 1469 1470static SDNode *FindCallStartFromCall(SDNode *Node) { 1471 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; 1472 assert(Node->getOperand(0).getValueType() == MVT::Other && 1473 "Node doesn't have a token chain argument!"); 1474 return FindCallStartFromCall(Node->getOperand(0).getNode()); 1475} 1476 1477SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1478 SDValue Chain = Node->getOperand(0); 1479 SDValue In1 = Node->getOperand(1); 1480 SDValue In2L = Node->getOperand(2); 1481 SDValue In2H = Node->getOperand(3); 1482 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1483 if (!SelectAddr(In1.getNode(), In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1484 return NULL; 1485 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1486 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1487 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1488 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1489 MVT::i32, MVT::i32, MVT::Other, Ops, 1490 array_lengthof(Ops)); 1491 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1492 return ResNode; 1493} 1494 1495SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { 1496 if (Node->hasAnyUseOfValue(0)) 1497 return 0; 1498 1499 // Optimize common patterns for __sync_add_and_fetch and 1500 // __sync_sub_and_fetch where the result is not used. This allows us 1501 // to use "lock" version of add, sub, inc, dec instructions. 1502 // FIXME: Do not use special instructions but instead add the "lock" 1503 // prefix to the target node somehow. The extra information will then be 1504 // transferred to machine instruction and it denotes the prefix. 1505 SDValue Chain = Node->getOperand(0); 1506 SDValue Ptr = Node->getOperand(1); 1507 SDValue Val = Node->getOperand(2); 1508 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1509 if (!SelectAddr(Ptr.getNode(), Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1510 return 0; 1511 1512 bool isInc = false, isDec = false, isSub = false, isCN = false; 1513 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val); 1514 if (CN) { 1515 isCN = true; 1516 int64_t CNVal = CN->getSExtValue(); 1517 if (CNVal == 1) 1518 isInc = true; 1519 else if (CNVal == -1) 1520 isDec = true; 1521 else if (CNVal >= 0) 1522 Val = CurDAG->getTargetConstant(CNVal, NVT); 1523 else { 1524 isSub = true; 1525 Val = CurDAG->getTargetConstant(-CNVal, NVT); 1526 } 1527 } else if (Val.hasOneUse() && 1528 Val.getOpcode() == ISD::SUB && 1529 X86::isZeroNode(Val.getOperand(0))) { 1530 isSub = true; 1531 Val = Val.getOperand(1); 1532 } 1533 1534 unsigned Opc = 0; 1535 switch (NVT.getSimpleVT().SimpleTy) { 1536 default: return 0; 1537 case MVT::i8: 1538 if (isInc) 1539 Opc = X86::LOCK_INC8m; 1540 else if (isDec) 1541 Opc = X86::LOCK_DEC8m; 1542 else if (isSub) { 1543 if (isCN) 1544 Opc = X86::LOCK_SUB8mi; 1545 else 1546 Opc = X86::LOCK_SUB8mr; 1547 } else { 1548 if (isCN) 1549 Opc = X86::LOCK_ADD8mi; 1550 else 1551 Opc = X86::LOCK_ADD8mr; 1552 } 1553 break; 1554 case MVT::i16: 1555 if (isInc) 1556 Opc = X86::LOCK_INC16m; 1557 else if (isDec) 1558 Opc = X86::LOCK_DEC16m; 1559 else if (isSub) { 1560 if (isCN) { 1561 if (Predicate_i16immSExt8(Val.getNode())) 1562 Opc = X86::LOCK_SUB16mi8; 1563 else 1564 Opc = X86::LOCK_SUB16mi; 1565 } else 1566 Opc = X86::LOCK_SUB16mr; 1567 } else { 1568 if (isCN) { 1569 if (Predicate_i16immSExt8(Val.getNode())) 1570 Opc = X86::LOCK_ADD16mi8; 1571 else 1572 Opc = X86::LOCK_ADD16mi; 1573 } else 1574 Opc = X86::LOCK_ADD16mr; 1575 } 1576 break; 1577 case MVT::i32: 1578 if (isInc) 1579 Opc = X86::LOCK_INC32m; 1580 else if (isDec) 1581 Opc = X86::LOCK_DEC32m; 1582 else if (isSub) { 1583 if (isCN) { 1584 if (Predicate_i32immSExt8(Val.getNode())) 1585 Opc = X86::LOCK_SUB32mi8; 1586 else 1587 Opc = X86::LOCK_SUB32mi; 1588 } else 1589 Opc = X86::LOCK_SUB32mr; 1590 } else { 1591 if (isCN) { 1592 if (Predicate_i32immSExt8(Val.getNode())) 1593 Opc = X86::LOCK_ADD32mi8; 1594 else 1595 Opc = X86::LOCK_ADD32mi; 1596 } else 1597 Opc = X86::LOCK_ADD32mr; 1598 } 1599 break; 1600 case MVT::i64: 1601 if (isInc) 1602 Opc = X86::LOCK_INC64m; 1603 else if (isDec) 1604 Opc = X86::LOCK_DEC64m; 1605 else if (isSub) { 1606 Opc = X86::LOCK_SUB64mr; 1607 if (isCN) { 1608 if (Predicate_i64immSExt8(Val.getNode())) 1609 Opc = X86::LOCK_SUB64mi8; 1610 else if (Predicate_i64immSExt32(Val.getNode())) 1611 Opc = X86::LOCK_SUB64mi32; 1612 } 1613 } else { 1614 Opc = X86::LOCK_ADD64mr; 1615 if (isCN) { 1616 if (Predicate_i64immSExt8(Val.getNode())) 1617 Opc = X86::LOCK_ADD64mi8; 1618 else if (Predicate_i64immSExt32(Val.getNode())) 1619 Opc = X86::LOCK_ADD64mi32; 1620 } 1621 } 1622 break; 1623 } 1624 1625 DebugLoc dl = Node->getDebugLoc(); 1626 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1627 dl, NVT), 0); 1628 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1629 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1630 if (isInc || isDec) { 1631 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1632 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0); 1633 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1634 SDValue RetVals[] = { Undef, Ret }; 1635 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1636 } else { 1637 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1638 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); 1639 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1640 SDValue RetVals[] = { Undef, Ret }; 1641 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1642 } 1643} 1644 1645/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1646/// any uses which require the SF or OF bits to be accurate. 1647static bool HasNoSignedComparisonUses(SDNode *N) { 1648 // Examine each user of the node. 1649 for (SDNode::use_iterator UI = N->use_begin(), 1650 UE = N->use_end(); UI != UE; ++UI) { 1651 // Only examine CopyToReg uses. 1652 if (UI->getOpcode() != ISD::CopyToReg) 1653 return false; 1654 // Only examine CopyToReg uses that copy to EFLAGS. 1655 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1656 X86::EFLAGS) 1657 return false; 1658 // Examine each user of the CopyToReg use. 1659 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1660 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1661 // Only examine the Flag result. 1662 if (FlagUI.getUse().getResNo() != 1) continue; 1663 // Anything unusual: assume conservatively. 1664 if (!FlagUI->isMachineOpcode()) return false; 1665 // Examine the opcode of the user. 1666 switch (FlagUI->getMachineOpcode()) { 1667 // These comparisons don't treat the most significant bit specially. 1668 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1669 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1670 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1671 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1672 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1673 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1674 case X86::CMOVA16rr: case X86::CMOVA16rm: 1675 case X86::CMOVA32rr: case X86::CMOVA32rm: 1676 case X86::CMOVA64rr: case X86::CMOVA64rm: 1677 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1678 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1679 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1680 case X86::CMOVB16rr: case X86::CMOVB16rm: 1681 case X86::CMOVB32rr: case X86::CMOVB32rm: 1682 case X86::CMOVB64rr: case X86::CMOVB64rm: 1683 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1684 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1685 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1686 case X86::CMOVE16rr: case X86::CMOVE16rm: 1687 case X86::CMOVE32rr: case X86::CMOVE32rm: 1688 case X86::CMOVE64rr: case X86::CMOVE64rm: 1689 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1690 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1691 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1692 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1693 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1694 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1695 case X86::CMOVP16rr: case X86::CMOVP16rm: 1696 case X86::CMOVP32rr: case X86::CMOVP32rm: 1697 case X86::CMOVP64rr: case X86::CMOVP64rm: 1698 continue; 1699 // Anything else: assume conservatively. 1700 default: return false; 1701 } 1702 } 1703 } 1704 return true; 1705} 1706 1707SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 1708 EVT NVT = Node->getValueType(0); 1709 unsigned Opc, MOpc; 1710 unsigned Opcode = Node->getOpcode(); 1711 DebugLoc dl = Node->getDebugLoc(); 1712 1713#ifndef NDEBUG 1714 DEBUG({ 1715 dbgs() << std::string(Indent, ' ') << "Selecting: "; 1716 Node->dump(CurDAG); 1717 dbgs() << '\n'; 1718 }); 1719 Indent += 2; 1720#endif 1721 1722 if (Node->isMachineOpcode()) { 1723#ifndef NDEBUG 1724 DEBUG({ 1725 dbgs() << std::string(Indent-2, ' ') << "== "; 1726 Node->dump(CurDAG); 1727 dbgs() << '\n'; 1728 }); 1729 Indent -= 2; 1730#endif 1731 return NULL; // Already selected. 1732 } 1733 1734 switch (Opcode) { 1735 default: break; 1736 case X86ISD::GlobalBaseReg: 1737 return getGlobalBaseReg(); 1738 1739 case X86ISD::ATOMOR64_DAG: 1740 return SelectAtomic64(Node, X86::ATOMOR6432); 1741 case X86ISD::ATOMXOR64_DAG: 1742 return SelectAtomic64(Node, X86::ATOMXOR6432); 1743 case X86ISD::ATOMADD64_DAG: 1744 return SelectAtomic64(Node, X86::ATOMADD6432); 1745 case X86ISD::ATOMSUB64_DAG: 1746 return SelectAtomic64(Node, X86::ATOMSUB6432); 1747 case X86ISD::ATOMNAND64_DAG: 1748 return SelectAtomic64(Node, X86::ATOMNAND6432); 1749 case X86ISD::ATOMAND64_DAG: 1750 return SelectAtomic64(Node, X86::ATOMAND6432); 1751 case X86ISD::ATOMSWAP64_DAG: 1752 return SelectAtomic64(Node, X86::ATOMSWAP6432); 1753 1754 case ISD::ATOMIC_LOAD_ADD: { 1755 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT); 1756 if (RetVal) 1757 return RetVal; 1758 break; 1759 } 1760 1761 case ISD::SMUL_LOHI: 1762 case ISD::UMUL_LOHI: { 1763 SDValue N0 = Node->getOperand(0); 1764 SDValue N1 = Node->getOperand(1); 1765 1766 bool isSigned = Opcode == ISD::SMUL_LOHI; 1767 if (!isSigned) { 1768 switch (NVT.getSimpleVT().SimpleTy) { 1769 default: llvm_unreachable("Unsupported VT!"); 1770 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 1771 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 1772 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; 1773 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; 1774 } 1775 } else { 1776 switch (NVT.getSimpleVT().SimpleTy) { 1777 default: llvm_unreachable("Unsupported VT!"); 1778 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 1779 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 1780 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 1781 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 1782 } 1783 } 1784 1785 unsigned LoReg, HiReg; 1786 switch (NVT.getSimpleVT().SimpleTy) { 1787 default: llvm_unreachable("Unsupported VT!"); 1788 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; 1789 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; 1790 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; 1791 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; 1792 } 1793 1794 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1795 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1796 // Multiply is commmutative. 1797 if (!foldedLoad) { 1798 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1799 if (foldedLoad) 1800 std::swap(N0, N1); 1801 } 1802 1803 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 1804 N0, SDValue()).getValue(1); 1805 1806 if (foldedLoad) { 1807 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1808 InFlag }; 1809 SDNode *CNode = 1810 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1811 array_lengthof(Ops)); 1812 InFlag = SDValue(CNode, 1); 1813 // Update the chain. 1814 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1815 } else { 1816 InFlag = 1817 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1818 } 1819 1820 // Copy the low half of the result, if it is needed. 1821 if (!SDValue(Node, 0).use_empty()) { 1822 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1823 LoReg, NVT, InFlag); 1824 InFlag = Result.getValue(2); 1825 ReplaceUses(SDValue(Node, 0), Result); 1826#ifndef NDEBUG 1827 DEBUG({ 1828 dbgs() << std::string(Indent-2, ' ') << "=> "; 1829 Result.getNode()->dump(CurDAG); 1830 dbgs() << '\n'; 1831 }); 1832#endif 1833 } 1834 // Copy the high half of the result, if it is needed. 1835 if (!SDValue(Node, 1).use_empty()) { 1836 SDValue Result; 1837 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1838 // Prevent use of AH in a REX instruction by referencing AX instead. 1839 // Shift it down 8 bits. 1840 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1841 X86::AX, MVT::i16, InFlag); 1842 InFlag = Result.getValue(2); 1843 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 1844 Result, 1845 CurDAG->getTargetConstant(8, MVT::i8)), 0); 1846 // Then truncate it down to i8. 1847 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 1848 MVT::i8, Result); 1849 } else { 1850 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1851 HiReg, NVT, InFlag); 1852 InFlag = Result.getValue(2); 1853 } 1854 ReplaceUses(SDValue(Node, 1), Result); 1855#ifndef NDEBUG 1856 DEBUG({ 1857 dbgs() << std::string(Indent-2, ' ') << "=> "; 1858 Result.getNode()->dump(CurDAG); 1859 dbgs() << '\n'; 1860 }); 1861#endif 1862 } 1863 1864#ifndef NDEBUG 1865 Indent -= 2; 1866#endif 1867 1868 return NULL; 1869 } 1870 1871 case ISD::SDIVREM: 1872 case ISD::UDIVREM: { 1873 SDValue N0 = Node->getOperand(0); 1874 SDValue N1 = Node->getOperand(1); 1875 1876 bool isSigned = Opcode == ISD::SDIVREM; 1877 if (!isSigned) { 1878 switch (NVT.getSimpleVT().SimpleTy) { 1879 default: llvm_unreachable("Unsupported VT!"); 1880 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 1881 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 1882 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 1883 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 1884 } 1885 } else { 1886 switch (NVT.getSimpleVT().SimpleTy) { 1887 default: llvm_unreachable("Unsupported VT!"); 1888 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 1889 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 1890 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 1891 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 1892 } 1893 } 1894 1895 unsigned LoReg, HiReg, ClrReg; 1896 unsigned ClrOpcode, SExtOpcode; 1897 switch (NVT.getSimpleVT().SimpleTy) { 1898 default: llvm_unreachable("Unsupported VT!"); 1899 case MVT::i8: 1900 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 1901 ClrOpcode = 0; 1902 SExtOpcode = X86::CBW; 1903 break; 1904 case MVT::i16: 1905 LoReg = X86::AX; HiReg = X86::DX; 1906 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; 1907 SExtOpcode = X86::CWD; 1908 break; 1909 case MVT::i32: 1910 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 1911 ClrOpcode = X86::MOV32r0; 1912 SExtOpcode = X86::CDQ; 1913 break; 1914 case MVT::i64: 1915 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 1916 ClrOpcode = X86::MOV64r0; 1917 SExtOpcode = X86::CQO; 1918 break; 1919 } 1920 1921 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1922 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 1923 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 1924 1925 SDValue InFlag; 1926 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 1927 // Special case for div8, just use a move with zero extension to AX to 1928 // clear the upper 8 bits (AH). 1929 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 1930 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 1931 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 1932 Move = 1933 SDValue(CurDAG->getMachineNode(X86::MOVZX16rm8, dl, MVT::i16, 1934 MVT::Other, Ops, 1935 array_lengthof(Ops)), 0); 1936 Chain = Move.getValue(1); 1937 ReplaceUses(N0.getValue(1), Chain); 1938 } else { 1939 Move = 1940 SDValue(CurDAG->getMachineNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0); 1941 Chain = CurDAG->getEntryNode(); 1942 } 1943 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue()); 1944 InFlag = Chain.getValue(1); 1945 } else { 1946 InFlag = 1947 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 1948 LoReg, N0, SDValue()).getValue(1); 1949 if (isSigned && !signBitIsZero) { 1950 // Sign extend the low part into the high part. 1951 InFlag = 1952 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Flag, InFlag),0); 1953 } else { 1954 // Zero out the high part, effectively zero extending the input. 1955 SDValue ClrNode = 1956 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 1957 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 1958 ClrNode, InFlag).getValue(1); 1959 } 1960 } 1961 1962 if (foldedLoad) { 1963 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 1964 InFlag }; 1965 SDNode *CNode = 1966 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops, 1967 array_lengthof(Ops)); 1968 InFlag = SDValue(CNode, 1); 1969 // Update the chain. 1970 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 1971 } else { 1972 InFlag = 1973 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0); 1974 } 1975 1976 // Copy the division (low) result, if it is needed. 1977 if (!SDValue(Node, 0).use_empty()) { 1978 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1979 LoReg, NVT, InFlag); 1980 InFlag = Result.getValue(2); 1981 ReplaceUses(SDValue(Node, 0), Result); 1982#ifndef NDEBUG 1983 DEBUG({ 1984 dbgs() << std::string(Indent-2, ' ') << "=> "; 1985 Result.getNode()->dump(CurDAG); 1986 dbgs() << '\n'; 1987 }); 1988#endif 1989 } 1990 // Copy the remainder (high) result, if it is needed. 1991 if (!SDValue(Node, 1).use_empty()) { 1992 SDValue Result; 1993 if (HiReg == X86::AH && Subtarget->is64Bit()) { 1994 // Prevent use of AH in a REX instruction by referencing AX instead. 1995 // Shift it down 8 bits. 1996 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 1997 X86::AX, MVT::i16, InFlag); 1998 InFlag = Result.getValue(2); 1999 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2000 Result, 2001 CurDAG->getTargetConstant(8, MVT::i8)), 2002 0); 2003 // Then truncate it down to i8. 2004 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 2005 MVT::i8, Result); 2006 } else { 2007 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2008 HiReg, NVT, InFlag); 2009 InFlag = Result.getValue(2); 2010 } 2011 ReplaceUses(SDValue(Node, 1), Result); 2012#ifndef NDEBUG 2013 DEBUG({ 2014 dbgs() << std::string(Indent-2, ' ') << "=> "; 2015 Result.getNode()->dump(CurDAG); 2016 dbgs() << '\n'; 2017 }); 2018#endif 2019 } 2020 2021#ifndef NDEBUG 2022 Indent -= 2; 2023#endif 2024 2025 return NULL; 2026 } 2027 2028 case X86ISD::CMP: { 2029 SDValue N0 = Node->getOperand(0); 2030 SDValue N1 = Node->getOperand(1); 2031 2032 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2033 // use a smaller encoding. 2034 if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 2035 N0.getValueType() != MVT::i8 && 2036 X86::isZeroNode(N1)) { 2037 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2038 if (!C) break; 2039 2040 // For example, convert "testl %eax, $8" to "testb %al, $8" 2041 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2042 (!(C->getZExtValue() & 0x80) || 2043 HasNoSignedComparisonUses(Node))) { 2044 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2045 SDValue Reg = N0.getNode()->getOperand(0); 2046 2047 // On x86-32, only the ABCD registers have 8-bit subregisters. 2048 if (!Subtarget->is64Bit()) { 2049 TargetRegisterClass *TRC = 0; 2050 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2051 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2052 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2053 default: llvm_unreachable("Unsupported TEST operand type!"); 2054 } 2055 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2056 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2057 Reg.getValueType(), Reg, RC), 0); 2058 } 2059 2060 // Extract the l-register. 2061 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl, 2062 MVT::i8, Reg); 2063 2064 // Emit a testb. 2065 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm); 2066 } 2067 2068 // For example, "testl %eax, $2048" to "testb %ah, $8". 2069 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2070 (!(C->getZExtValue() & 0x8000) || 2071 HasNoSignedComparisonUses(Node))) { 2072 // Shift the immediate right by 8 bits. 2073 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2074 MVT::i8); 2075 SDValue Reg = N0.getNode()->getOperand(0); 2076 2077 // Put the value in an ABCD register. 2078 TargetRegisterClass *TRC = 0; 2079 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2080 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2081 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2082 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2083 default: llvm_unreachable("Unsupported TEST operand type!"); 2084 } 2085 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2086 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2087 Reg.getValueType(), Reg, RC), 0); 2088 2089 // Extract the h-register. 2090 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl, 2091 MVT::i8, Reg); 2092 2093 // Emit a testb. No special NOREX tricks are needed since there's 2094 // only one GPR operand! 2095 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2096 Subreg, ShiftedImm); 2097 } 2098 2099 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2100 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2101 N0.getValueType() != MVT::i16 && 2102 (!(C->getZExtValue() & 0x8000) || 2103 HasNoSignedComparisonUses(Node))) { 2104 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2105 SDValue Reg = N0.getNode()->getOperand(0); 2106 2107 // Extract the 16-bit subregister. 2108 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl, 2109 MVT::i16, Reg); 2110 2111 // Emit a testw. 2112 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm); 2113 } 2114 2115 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2116 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2117 N0.getValueType() == MVT::i64 && 2118 (!(C->getZExtValue() & 0x80000000) || 2119 HasNoSignedComparisonUses(Node))) { 2120 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2121 SDValue Reg = N0.getNode()->getOperand(0); 2122 2123 // Extract the 32-bit subregister. 2124 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl, 2125 MVT::i32, Reg); 2126 2127 // Emit a testl. 2128 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm); 2129 } 2130 } 2131 break; 2132 } 2133 } 2134 2135 SDNode *ResNode = SelectCode(Node); 2136 2137#ifndef NDEBUG 2138 DEBUG({ 2139 dbgs() << std::string(Indent-2, ' ') << "=> "; 2140 if (ResNode == NULL || ResNode == Node) 2141 Node->dump(CurDAG); 2142 else 2143 ResNode->dump(CurDAG); 2144 dbgs() << '\n'; 2145 }); 2146 Indent -= 2; 2147#endif 2148 2149 return ResNode; 2150} 2151 2152bool X86DAGToDAGISel:: 2153SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2154 std::vector<SDValue> &OutOps) { 2155 SDValue Op0, Op1, Op2, Op3, Op4; 2156 switch (ConstraintCode) { 2157 case 'o': // offsetable ?? 2158 case 'v': // not offsetable ?? 2159 default: return true; 2160 case 'm': // memory 2161 if (!SelectAddr(Op.getNode(), Op, Op0, Op1, Op2, Op3, Op4)) 2162 return true; 2163 break; 2164 } 2165 2166 OutOps.push_back(Op0); 2167 OutOps.push_back(Op1); 2168 OutOps.push_back(Op2); 2169 OutOps.push_back(Op3); 2170 OutOps.push_back(Op4); 2171 return false; 2172} 2173 2174/// createX86ISelDag - This pass converts a legalized DAG into a 2175/// X86-specific DAG, ready for instruction scheduling. 2176/// 2177FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2178 llvm::CodeGenOpt::Level OptLevel) { 2179 return new X86DAGToDAGISel(TM, OptLevel); 2180} 2181