1//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a DAG pattern matching instruction selector for X86, 11// converting from a legalized dag to a X86 dag. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "x86-isel" 16#include "X86.h" 17#include "X86InstrBuilder.h" 18#include "X86MachineFunctionInfo.h" 19#include "X86RegisterInfo.h" 20#include "X86Subtarget.h" 21#include "X86TargetMachine.h" 22#include "llvm/ADT/Statistic.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/SelectionDAGISel.h" 28#include "llvm/IR/Instructions.h" 29#include "llvm/IR/Intrinsics.h" 30#include "llvm/IR/Type.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/ErrorHandling.h" 33#include "llvm/Support/MathExtras.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetMachine.h" 36#include "llvm/Target/TargetOptions.h" 37using namespace llvm; 38 39STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 40 41//===----------------------------------------------------------------------===// 42// Pattern Matcher Implementation 43//===----------------------------------------------------------------------===// 44 45namespace { 46 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 47 /// SDValue's instead of register numbers for the leaves of the matched 48 /// tree. 49 struct X86ISelAddressMode { 50 enum { 51 RegBase, 52 FrameIndexBase 53 } BaseType; 54 55 // This is really a union, discriminated by BaseType! 56 SDValue Base_Reg; 57 int Base_FrameIndex; 58 59 unsigned Scale; 60 SDValue IndexReg; 61 int32_t Disp; 62 SDValue Segment; 63 const GlobalValue *GV; 64 const Constant *CP; 65 const BlockAddress *BlockAddr; 66 const char *ES; 67 int JT; 68 unsigned Align; // CP alignment. 69 unsigned char SymbolFlags; // X86II::MO_* 70 71 X86ISelAddressMode() 72 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 73 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 74 SymbolFlags(X86II::MO_NO_FLAG) { 75 } 76 77 bool hasSymbolicDisplacement() const { 78 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 79 } 80 81 bool hasBaseOrIndexReg() const { 82 return BaseType == FrameIndexBase || 83 IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 84 } 85 86 /// isRIPRelative - Return true if this addressing mode is already RIP 87 /// relative. 88 bool isRIPRelative() const { 89 if (BaseType != RegBase) return false; 90 if (RegisterSDNode *RegNode = 91 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 92 return RegNode->getReg() == X86::RIP; 93 return false; 94 } 95 96 void setBaseReg(SDValue Reg) { 97 BaseType = RegBase; 98 Base_Reg = Reg; 99 } 100 101#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 102 void dump() { 103 dbgs() << "X86ISelAddressMode " << this << '\n'; 104 dbgs() << "Base_Reg "; 105 if (Base_Reg.getNode() != 0) 106 Base_Reg.getNode()->dump(); 107 else 108 dbgs() << "nul"; 109 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 110 << " Scale" << Scale << '\n' 111 << "IndexReg "; 112 if (IndexReg.getNode() != 0) 113 IndexReg.getNode()->dump(); 114 else 115 dbgs() << "nul"; 116 dbgs() << " Disp " << Disp << '\n' 117 << "GV "; 118 if (GV) 119 GV->dump(); 120 else 121 dbgs() << "nul"; 122 dbgs() << " CP "; 123 if (CP) 124 CP->dump(); 125 else 126 dbgs() << "nul"; 127 dbgs() << '\n' 128 << "ES "; 129 if (ES) 130 dbgs() << ES; 131 else 132 dbgs() << "nul"; 133 dbgs() << " JT" << JT << " Align" << Align << '\n'; 134 } 135#endif 136 }; 137} 138 139namespace { 140 //===--------------------------------------------------------------------===// 141 /// ISel - X86 specific code to select X86 machine instructions for 142 /// SelectionDAG operations. 143 /// 144 class X86DAGToDAGISel : public SelectionDAGISel { 145 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 146 /// make the right decision when generating code for different targets. 147 const X86Subtarget *Subtarget; 148 149 /// OptForSize - If true, selector should try to optimize for code size 150 /// instead of performance. 151 bool OptForSize; 152 153 public: 154 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 155 : SelectionDAGISel(tm, OptLevel), 156 Subtarget(&tm.getSubtarget<X86Subtarget>()), 157 OptForSize(false) {} 158 159 virtual const char *getPassName() const { 160 return "X86 DAG->DAG Instruction Selection"; 161 } 162 163 virtual void EmitFunctionEntryCode(); 164 165 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 166 167 virtual void PreprocessISelDAG(); 168 169 inline bool immSext8(SDNode *N) const { 170 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 171 } 172 173 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 174 // sign extended field. 175 inline bool i64immSExt32(SDNode *N) const { 176 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 177 return (int64_t)v == (int32_t)v; 178 } 179 180// Include the pieces autogenerated from the target description. 181#include "X86GenDAGISel.inc" 182 183 private: 184 SDNode *Select(SDNode *N); 185 SDNode *SelectGather(SDNode *N, unsigned Opc); 186 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 187 SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT); 188 189 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 190 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 191 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 192 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 193 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 194 unsigned Depth); 195 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 196 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 197 SDValue &Scale, SDValue &Index, SDValue &Disp, 198 SDValue &Segment); 199 bool SelectMOV64Imm32(SDValue N, SDValue &Imm); 200 bool SelectLEAAddr(SDValue N, SDValue &Base, 201 SDValue &Scale, SDValue &Index, SDValue &Disp, 202 SDValue &Segment); 203 bool SelectLEA64_32Addr(SDValue N, SDValue &Base, 204 SDValue &Scale, SDValue &Index, SDValue &Disp, 205 SDValue &Segment); 206 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 207 SDValue &Scale, SDValue &Index, SDValue &Disp, 208 SDValue &Segment); 209 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 210 SDValue &Base, SDValue &Scale, 211 SDValue &Index, SDValue &Disp, 212 SDValue &Segment, 213 SDValue &NodeWithChain); 214 215 bool TryFoldLoad(SDNode *P, SDValue N, 216 SDValue &Base, SDValue &Scale, 217 SDValue &Index, SDValue &Disp, 218 SDValue &Segment); 219 220 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 221 /// inline asm expressions. 222 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 223 char ConstraintCode, 224 std::vector<SDValue> &OutOps); 225 226 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 227 228 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 229 SDValue &Scale, SDValue &Index, 230 SDValue &Disp, SDValue &Segment) { 231 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 232 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, 233 getTargetLowering()->getPointerTy()) : 234 AM.Base_Reg; 235 Scale = getI8Imm(AM.Scale); 236 Index = AM.IndexReg; 237 // These are 32-bit even in 64-bit mode since RIP relative offset 238 // is 32-bit. 239 if (AM.GV) 240 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), 241 MVT::i32, AM.Disp, 242 AM.SymbolFlags); 243 else if (AM.CP) 244 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 245 AM.Align, AM.Disp, AM.SymbolFlags); 246 else if (AM.ES) { 247 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 248 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 249 } else if (AM.JT != -1) { 250 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 251 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 252 } else if (AM.BlockAddr) 253 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 254 AM.SymbolFlags); 255 else 256 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 257 258 if (AM.Segment.getNode()) 259 Segment = AM.Segment; 260 else 261 Segment = CurDAG->getRegister(0, MVT::i32); 262 } 263 264 /// getI8Imm - Return a target constant with the specified value, of type 265 /// i8. 266 inline SDValue getI8Imm(unsigned Imm) { 267 return CurDAG->getTargetConstant(Imm, MVT::i8); 268 } 269 270 /// getI32Imm - Return a target constant with the specified value, of type 271 /// i32. 272 inline SDValue getI32Imm(unsigned Imm) { 273 return CurDAG->getTargetConstant(Imm, MVT::i32); 274 } 275 276 /// getGlobalBaseReg - Return an SDNode that returns the value of 277 /// the global base register. Output instructions required to 278 /// initialize the global base register, if necessary. 279 /// 280 SDNode *getGlobalBaseReg(); 281 282 /// getTargetMachine - Return a reference to the TargetMachine, casted 283 /// to the target-specific type. 284 const X86TargetMachine &getTargetMachine() const { 285 return static_cast<const X86TargetMachine &>(TM); 286 } 287 288 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 289 /// to the target-specific type. 290 const X86InstrInfo *getInstrInfo() const { 291 return getTargetMachine().getInstrInfo(); 292 } 293 294 /// \brief Address-mode matching performs shift-of-and to and-of-shift 295 /// reassociation in order to expose more scaled addressing 296 /// opportunities. 297 bool ComplexPatternFuncMutatesDAG() const { 298 return true; 299 } 300 }; 301} 302 303 304bool 305X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 306 if (OptLevel == CodeGenOpt::None) return false; 307 308 if (!N.hasOneUse()) 309 return false; 310 311 if (N.getOpcode() != ISD::LOAD) 312 return true; 313 314 // If N is a load, do additional profitability checks. 315 if (U == Root) { 316 switch (U->getOpcode()) { 317 default: break; 318 case X86ISD::ADD: 319 case X86ISD::SUB: 320 case X86ISD::AND: 321 case X86ISD::XOR: 322 case X86ISD::OR: 323 case ISD::ADD: 324 case ISD::ADDC: 325 case ISD::ADDE: 326 case ISD::AND: 327 case ISD::OR: 328 case ISD::XOR: { 329 SDValue Op1 = U->getOperand(1); 330 331 // If the other operand is a 8-bit immediate we should fold the immediate 332 // instead. This reduces code size. 333 // e.g. 334 // movl 4(%esp), %eax 335 // addl $4, %eax 336 // vs. 337 // movl $4, %eax 338 // addl 4(%esp), %eax 339 // The former is 2 bytes shorter. In case where the increment is 1, then 340 // the saving can be 4 bytes (by using incl %eax). 341 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 342 if (Imm->getAPIntValue().isSignedIntN(8)) 343 return false; 344 345 // If the other operand is a TLS address, we should fold it instead. 346 // This produces 347 // movl %gs:0, %eax 348 // leal i@NTPOFF(%eax), %eax 349 // instead of 350 // movl $i@NTPOFF, %eax 351 // addl %gs:0, %eax 352 // if the block also has an access to a second TLS address this will save 353 // a load. 354 // FIXME: This is probably also true for non TLS addresses. 355 if (Op1.getOpcode() == X86ISD::Wrapper) { 356 SDValue Val = Op1.getOperand(0); 357 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 358 return false; 359 } 360 } 361 } 362 } 363 364 return true; 365} 366 367/// MoveBelowCallOrigChain - Replace the original chain operand of the call with 368/// load's chain operand and move load below the call's chain operand. 369static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 370 SDValue Call, SDValue OrigChain) { 371 SmallVector<SDValue, 8> Ops; 372 SDValue Chain = OrigChain.getOperand(0); 373 if (Chain.getNode() == Load.getNode()) 374 Ops.push_back(Load.getOperand(0)); 375 else { 376 assert(Chain.getOpcode() == ISD::TokenFactor && 377 "Unexpected chain operand"); 378 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 379 if (Chain.getOperand(i).getNode() == Load.getNode()) 380 Ops.push_back(Load.getOperand(0)); 381 else 382 Ops.push_back(Chain.getOperand(i)); 383 SDValue NewChain = 384 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), 385 MVT::Other, &Ops[0], Ops.size()); 386 Ops.clear(); 387 Ops.push_back(NewChain); 388 } 389 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 390 Ops.push_back(OrigChain.getOperand(i)); 391 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 392 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 393 Load.getOperand(1), Load.getOperand(2)); 394 395 unsigned NumOps = Call.getNode()->getNumOperands(); 396 Ops.clear(); 397 Ops.push_back(SDValue(Load.getNode(), 1)); 398 for (unsigned i = 1, e = NumOps; i != e; ++i) 399 Ops.push_back(Call.getOperand(i)); 400 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps); 401} 402 403/// isCalleeLoad - Return true if call address is a load and it can be 404/// moved below CALLSEQ_START and the chains leading up to the call. 405/// Return the CALLSEQ_START by reference as a second output. 406/// In the case of a tail call, there isn't a callseq node between the call 407/// chain and the load. 408static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 409 // The transformation is somewhat dangerous if the call's chain was glued to 410 // the call. After MoveBelowOrigChain the load is moved between the call and 411 // the chain, this can create a cycle if the load is not folded. So it is 412 // *really* important that we are sure the load will be folded. 413 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 414 return false; 415 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 416 if (!LD || 417 LD->isVolatile() || 418 LD->getAddressingMode() != ISD::UNINDEXED || 419 LD->getExtensionType() != ISD::NON_EXTLOAD) 420 return false; 421 422 // Now let's find the callseq_start. 423 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 424 if (!Chain.hasOneUse()) 425 return false; 426 Chain = Chain.getOperand(0); 427 } 428 429 if (!Chain.getNumOperands()) 430 return false; 431 // Since we are not checking for AA here, conservatively abort if the chain 432 // writes to memory. It's not safe to move the callee (a load) across a store. 433 if (isa<MemSDNode>(Chain.getNode()) && 434 cast<MemSDNode>(Chain.getNode())->writeMem()) 435 return false; 436 if (Chain.getOperand(0).getNode() == Callee.getNode()) 437 return true; 438 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 439 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 440 Callee.getValue(1).hasOneUse()) 441 return true; 442 return false; 443} 444 445void X86DAGToDAGISel::PreprocessISelDAG() { 446 // OptForSize is used in pattern predicates that isel is matching. 447 OptForSize = MF->getFunction()->getAttributes(). 448 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 449 450 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 451 E = CurDAG->allnodes_end(); I != E; ) { 452 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 453 454 if (OptLevel != CodeGenOpt::None && 455 // Only does this when target favors doesn't favor register indirect 456 // call. 457 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) || 458 (N->getOpcode() == X86ISD::TC_RETURN && 459 // Only does this if load can be folded into TC_RETURN. 460 (Subtarget->is64Bit() || 461 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) { 462 /// Also try moving call address load from outside callseq_start to just 463 /// before the call to allow it to be folded. 464 /// 465 /// [Load chain] 466 /// ^ 467 /// | 468 /// [Load] 469 /// ^ ^ 470 /// | | 471 /// / \-- 472 /// / | 473 ///[CALLSEQ_START] | 474 /// ^ | 475 /// | | 476 /// [LOAD/C2Reg] | 477 /// | | 478 /// \ / 479 /// \ / 480 /// [CALL] 481 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 482 SDValue Chain = N->getOperand(0); 483 SDValue Load = N->getOperand(1); 484 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 485 continue; 486 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 487 ++NumLoadMoved; 488 continue; 489 } 490 491 // Lower fpround and fpextend nodes that target the FP stack to be store and 492 // load to the stack. This is a gross hack. We would like to simply mark 493 // these as being illegal, but when we do that, legalize produces these when 494 // it expands calls, then expands these in the same legalize pass. We would 495 // like dag combine to be able to hack on these between the call expansion 496 // and the node legalization. As such this pass basically does "really 497 // late" legalization of these inline with the X86 isel pass. 498 // FIXME: This should only happen when not compiled with -O0. 499 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 500 continue; 501 502 MVT SrcVT = N->getOperand(0).getSimpleValueType(); 503 MVT DstVT = N->getSimpleValueType(0); 504 505 // If any of the sources are vectors, no fp stack involved. 506 if (SrcVT.isVector() || DstVT.isVector()) 507 continue; 508 509 // If the source and destination are SSE registers, then this is a legal 510 // conversion that should not be lowered. 511 const X86TargetLowering *X86Lowering = 512 static_cast<const X86TargetLowering *>(getTargetLowering()); 513 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); 514 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); 515 if (SrcIsSSE && DstIsSSE) 516 continue; 517 518 if (!SrcIsSSE && !DstIsSSE) { 519 // If this is an FPStack extension, it is a noop. 520 if (N->getOpcode() == ISD::FP_EXTEND) 521 continue; 522 // If this is a value-preserving FPStack truncation, it is a noop. 523 if (N->getConstantOperandVal(1)) 524 continue; 525 } 526 527 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 528 // FPStack has extload and truncstore. SSE can fold direct loads into other 529 // operations. Based on this, decide what we want to do. 530 MVT MemVT; 531 if (N->getOpcode() == ISD::FP_ROUND) 532 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 533 else 534 MemVT = SrcIsSSE ? SrcVT : DstVT; 535 536 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 537 SDLoc dl(N); 538 539 // FIXME: optimize the case where the src/dest is a load or store? 540 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 541 N->getOperand(0), 542 MemTmp, MachinePointerInfo(), MemVT, 543 false, false, 0); 544 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 545 MachinePointerInfo(), 546 MemVT, false, false, 0); 547 548 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 549 // extload we created. This will cause general havok on the dag because 550 // anything below the conversion could be folded into other existing nodes. 551 // To avoid invalidating 'I', back it up to the convert node. 552 --I; 553 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 554 555 // Now that we did that, the node is dead. Increment the iterator to the 556 // next node to process, then delete N. 557 ++I; 558 CurDAG->DeleteNode(N); 559 } 560} 561 562 563/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 564/// the main function. 565void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 566 MachineFrameInfo *MFI) { 567 const TargetInstrInfo *TII = TM.getInstrInfo(); 568 if (Subtarget->isTargetCygMing()) { 569 unsigned CallOp = 570 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 571 BuildMI(BB, DebugLoc(), 572 TII->get(CallOp)).addExternalSymbol("__main"); 573 } 574} 575 576void X86DAGToDAGISel::EmitFunctionEntryCode() { 577 // If this is main, emit special code for main. 578 if (const Function *Fn = MF->getFunction()) 579 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 580 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 581} 582 583static bool isDispSafeForFrameIndex(int64_t Val) { 584 // On 64-bit platforms, we can run into an issue where a frame index 585 // includes a displacement that, when added to the explicit displacement, 586 // will overflow the displacement field. Assuming that the frame index 587 // displacement fits into a 31-bit integer (which is only slightly more 588 // aggressive than the current fundamental assumption that it fits into 589 // a 32-bit integer), a 31-bit disp should always be safe. 590 return isInt<31>(Val); 591} 592 593bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 594 X86ISelAddressMode &AM) { 595 int64_t Val = AM.Disp + Offset; 596 CodeModel::Model M = TM.getCodeModel(); 597 if (Subtarget->is64Bit()) { 598 if (!X86::isOffsetSuitableForCodeModel(Val, M, 599 AM.hasSymbolicDisplacement())) 600 return true; 601 // In addition to the checks required for a register base, check that 602 // we do not try to use an unsafe Disp with a frame index. 603 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 604 !isDispSafeForFrameIndex(Val)) 605 return true; 606 } 607 AM.Disp = Val; 608 return false; 609 610} 611 612bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 613 SDValue Address = N->getOperand(1); 614 615 // load gs:0 -> GS segment register. 616 // load fs:0 -> FS segment register. 617 // 618 // This optimization is valid because the GNU TLS model defines that 619 // gs:0 (or fs:0 on X86-64) contains its own address. 620 // For more information see http://people.redhat.com/drepper/tls.pdf 621 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 622 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 623 Subtarget->isTargetLinux()) 624 switch (N->getPointerInfo().getAddrSpace()) { 625 case 256: 626 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 627 return false; 628 case 257: 629 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 630 return false; 631 } 632 633 return true; 634} 635 636/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 637/// into an addressing mode. These wrap things that will resolve down into a 638/// symbol reference. If no match is possible, this returns true, otherwise it 639/// returns false. 640bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 641 // If the addressing mode already has a symbol as the displacement, we can 642 // never match another symbol. 643 if (AM.hasSymbolicDisplacement()) 644 return true; 645 646 SDValue N0 = N.getOperand(0); 647 CodeModel::Model M = TM.getCodeModel(); 648 649 // Handle X86-64 rip-relative addresses. We check this before checking direct 650 // folding because RIP is preferable to non-RIP accesses. 651 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 652 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 653 // they cannot be folded into immediate fields. 654 // FIXME: This can be improved for kernel and other models? 655 (M == CodeModel::Small || M == CodeModel::Kernel)) { 656 // Base and index reg must be 0 in order to use %rip as base. 657 if (AM.hasBaseOrIndexReg()) 658 return true; 659 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 660 X86ISelAddressMode Backup = AM; 661 AM.GV = G->getGlobal(); 662 AM.SymbolFlags = G->getTargetFlags(); 663 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 664 AM = Backup; 665 return true; 666 } 667 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 668 X86ISelAddressMode Backup = AM; 669 AM.CP = CP->getConstVal(); 670 AM.Align = CP->getAlignment(); 671 AM.SymbolFlags = CP->getTargetFlags(); 672 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 673 AM = Backup; 674 return true; 675 } 676 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 677 AM.ES = S->getSymbol(); 678 AM.SymbolFlags = S->getTargetFlags(); 679 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 680 AM.JT = J->getIndex(); 681 AM.SymbolFlags = J->getTargetFlags(); 682 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 683 X86ISelAddressMode Backup = AM; 684 AM.BlockAddr = BA->getBlockAddress(); 685 AM.SymbolFlags = BA->getTargetFlags(); 686 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { 687 AM = Backup; 688 return true; 689 } 690 } else 691 llvm_unreachable("Unhandled symbol reference node."); 692 693 if (N.getOpcode() == X86ISD::WrapperRIP) 694 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 695 return false; 696 } 697 698 // Handle the case when globals fit in our immediate field: This is true for 699 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 700 // mode, this only applies to a non-RIP-relative computation. 701 if (!Subtarget->is64Bit() || 702 M == CodeModel::Small || M == CodeModel::Kernel) { 703 assert(N.getOpcode() != X86ISD::WrapperRIP && 704 "RIP-relative addressing already handled"); 705 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 706 AM.GV = G->getGlobal(); 707 AM.Disp += G->getOffset(); 708 AM.SymbolFlags = G->getTargetFlags(); 709 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 710 AM.CP = CP->getConstVal(); 711 AM.Align = CP->getAlignment(); 712 AM.Disp += CP->getOffset(); 713 AM.SymbolFlags = CP->getTargetFlags(); 714 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 715 AM.ES = S->getSymbol(); 716 AM.SymbolFlags = S->getTargetFlags(); 717 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 718 AM.JT = J->getIndex(); 719 AM.SymbolFlags = J->getTargetFlags(); 720 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 721 AM.BlockAddr = BA->getBlockAddress(); 722 AM.Disp += BA->getOffset(); 723 AM.SymbolFlags = BA->getTargetFlags(); 724 } else 725 llvm_unreachable("Unhandled symbol reference node."); 726 return false; 727 } 728 729 return true; 730} 731 732/// MatchAddress - Add the specified node to the specified addressing mode, 733/// returning true if it cannot be done. This just pattern matches for the 734/// addressing mode. 735bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 736 if (MatchAddressRecursively(N, AM, 0)) 737 return true; 738 739 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 740 // a smaller encoding and avoids a scaled-index. 741 if (AM.Scale == 2 && 742 AM.BaseType == X86ISelAddressMode::RegBase && 743 AM.Base_Reg.getNode() == 0) { 744 AM.Base_Reg = AM.IndexReg; 745 AM.Scale = 1; 746 } 747 748 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 749 // because it has a smaller encoding. 750 // TODO: Which other code models can use this? 751 if (TM.getCodeModel() == CodeModel::Small && 752 Subtarget->is64Bit() && 753 AM.Scale == 1 && 754 AM.BaseType == X86ISelAddressMode::RegBase && 755 AM.Base_Reg.getNode() == 0 && 756 AM.IndexReg.getNode() == 0 && 757 AM.SymbolFlags == X86II::MO_NO_FLAG && 758 AM.hasSymbolicDisplacement()) 759 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 760 761 return false; 762} 763 764// Insert a node into the DAG at least before the Pos node's position. This 765// will reposition the node as needed, and will assign it a node ID that is <= 766// the Pos node's ID. Note that this does *not* preserve the uniqueness of node 767// IDs! The selection DAG must no longer depend on their uniqueness when this 768// is used. 769static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 770 if (N.getNode()->getNodeId() == -1 || 771 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 772 DAG.RepositionNode(Pos.getNode(), N.getNode()); 773 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 774 } 775} 776 777// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 778// allows us to convert the shift and and into an h-register extract and 779// a scaled index. Returns false if the simplification is performed. 780static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 781 uint64_t Mask, 782 SDValue Shift, SDValue X, 783 X86ISelAddressMode &AM) { 784 if (Shift.getOpcode() != ISD::SRL || 785 !isa<ConstantSDNode>(Shift.getOperand(1)) || 786 !Shift.hasOneUse()) 787 return true; 788 789 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 790 if (ScaleLog <= 0 || ScaleLog >= 4 || 791 Mask != (0xffu << ScaleLog)) 792 return true; 793 794 MVT VT = N.getSimpleValueType(); 795 SDLoc DL(N); 796 SDValue Eight = DAG.getConstant(8, MVT::i8); 797 SDValue NewMask = DAG.getConstant(0xff, VT); 798 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 799 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 800 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 801 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 802 803 // Insert the new nodes into the topological ordering. We must do this in 804 // a valid topological ordering as nothing is going to go back and re-sort 805 // these nodes. We continually insert before 'N' in sequence as this is 806 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 807 // hierarchy left to express. 808 InsertDAGNode(DAG, N, Eight); 809 InsertDAGNode(DAG, N, Srl); 810 InsertDAGNode(DAG, N, NewMask); 811 InsertDAGNode(DAG, N, And); 812 InsertDAGNode(DAG, N, ShlCount); 813 InsertDAGNode(DAG, N, Shl); 814 DAG.ReplaceAllUsesWith(N, Shl); 815 AM.IndexReg = And; 816 AM.Scale = (1 << ScaleLog); 817 return false; 818} 819 820// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 821// allows us to fold the shift into this addressing mode. Returns false if the 822// transform succeeded. 823static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 824 uint64_t Mask, 825 SDValue Shift, SDValue X, 826 X86ISelAddressMode &AM) { 827 if (Shift.getOpcode() != ISD::SHL || 828 !isa<ConstantSDNode>(Shift.getOperand(1))) 829 return true; 830 831 // Not likely to be profitable if either the AND or SHIFT node has more 832 // than one use (unless all uses are for address computation). Besides, 833 // isel mechanism requires their node ids to be reused. 834 if (!N.hasOneUse() || !Shift.hasOneUse()) 835 return true; 836 837 // Verify that the shift amount is something we can fold. 838 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 839 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 840 return true; 841 842 MVT VT = N.getSimpleValueType(); 843 SDLoc DL(N); 844 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 845 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 846 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 847 848 // Insert the new nodes into the topological ordering. We must do this in 849 // a valid topological ordering as nothing is going to go back and re-sort 850 // these nodes. We continually insert before 'N' in sequence as this is 851 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 852 // hierarchy left to express. 853 InsertDAGNode(DAG, N, NewMask); 854 InsertDAGNode(DAG, N, NewAnd); 855 InsertDAGNode(DAG, N, NewShift); 856 DAG.ReplaceAllUsesWith(N, NewShift); 857 858 AM.Scale = 1 << ShiftAmt; 859 AM.IndexReg = NewAnd; 860 return false; 861} 862 863// Implement some heroics to detect shifts of masked values where the mask can 864// be replaced by extending the shift and undoing that in the addressing mode 865// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 866// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 867// the addressing mode. This results in code such as: 868// 869// int f(short *y, int *lookup_table) { 870// ... 871// return *y + lookup_table[*y >> 11]; 872// } 873// 874// Turning into: 875// movzwl (%rdi), %eax 876// movl %eax, %ecx 877// shrl $11, %ecx 878// addl (%rsi,%rcx,4), %eax 879// 880// Instead of: 881// movzwl (%rdi), %eax 882// movl %eax, %ecx 883// shrl $9, %ecx 884// andl $124, %rcx 885// addl (%rsi,%rcx), %eax 886// 887// Note that this function assumes the mask is provided as a mask *after* the 888// value is shifted. The input chain may or may not match that, but computing 889// such a mask is trivial. 890static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 891 uint64_t Mask, 892 SDValue Shift, SDValue X, 893 X86ISelAddressMode &AM) { 894 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 895 !isa<ConstantSDNode>(Shift.getOperand(1))) 896 return true; 897 898 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 899 unsigned MaskLZ = countLeadingZeros(Mask); 900 unsigned MaskTZ = countTrailingZeros(Mask); 901 902 // The amount of shift we're trying to fit into the addressing mode is taken 903 // from the trailing zeros of the mask. 904 unsigned AMShiftAmt = MaskTZ; 905 906 // There is nothing we can do here unless the mask is removing some bits. 907 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 908 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 909 910 // We also need to ensure that mask is a continuous run of bits. 911 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 912 913 // Scale the leading zero count down based on the actual size of the value. 914 // Also scale it down based on the size of the shift. 915 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt; 916 917 // The final check is to ensure that any masked out high bits of X are 918 // already known to be zero. Otherwise, the mask has a semantic impact 919 // other than masking out a couple of low bits. Unfortunately, because of 920 // the mask, zero extensions will be removed from operands in some cases. 921 // This code works extra hard to look through extensions because we can 922 // replace them with zero extensions cheaply if necessary. 923 bool ReplacingAnyExtend = false; 924 if (X.getOpcode() == ISD::ANY_EXTEND) { 925 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() - 926 X.getOperand(0).getSimpleValueType().getSizeInBits(); 927 // Assume that we'll replace the any-extend with a zero-extend, and 928 // narrow the search to the extended value. 929 X = X.getOperand(0); 930 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 931 ReplacingAnyExtend = true; 932 } 933 APInt MaskedHighBits = 934 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ); 935 APInt KnownZero, KnownOne; 936 DAG.ComputeMaskedBits(X, KnownZero, KnownOne); 937 if (MaskedHighBits != KnownZero) return true; 938 939 // We've identified a pattern that can be transformed into a single shift 940 // and an addressing mode. Make it so. 941 MVT VT = N.getSimpleValueType(); 942 if (ReplacingAnyExtend) { 943 assert(X.getValueType() != VT); 944 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 945 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); 946 InsertDAGNode(DAG, N, NewX); 947 X = NewX; 948 } 949 SDLoc DL(N); 950 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 951 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 952 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 953 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 954 955 // Insert the new nodes into the topological ordering. We must do this in 956 // a valid topological ordering as nothing is going to go back and re-sort 957 // these nodes. We continually insert before 'N' in sequence as this is 958 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 959 // hierarchy left to express. 960 InsertDAGNode(DAG, N, NewSRLAmt); 961 InsertDAGNode(DAG, N, NewSRL); 962 InsertDAGNode(DAG, N, NewSHLAmt); 963 InsertDAGNode(DAG, N, NewSHL); 964 DAG.ReplaceAllUsesWith(N, NewSHL); 965 966 AM.Scale = 1 << AMShiftAmt; 967 AM.IndexReg = NewSRL; 968 return false; 969} 970 971bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 972 unsigned Depth) { 973 SDLoc dl(N); 974 DEBUG({ 975 dbgs() << "MatchAddress: "; 976 AM.dump(); 977 }); 978 // Limit recursion. 979 if (Depth > 5) 980 return MatchAddressBase(N, AM); 981 982 // If this is already a %rip relative address, we can only merge immediates 983 // into it. Instead of handling this in every case, we handle it here. 984 // RIP relative addressing: %rip + 32-bit displacement! 985 if (AM.isRIPRelative()) { 986 // FIXME: JumpTable and ExternalSymbol address currently don't like 987 // displacements. It isn't very important, but this should be fixed for 988 // consistency. 989 if (!AM.ES && AM.JT != -1) return true; 990 991 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 992 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 993 return false; 994 return true; 995 } 996 997 switch (N.getOpcode()) { 998 default: break; 999 case ISD::Constant: { 1000 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 1001 if (!FoldOffsetIntoAddress(Val, AM)) 1002 return false; 1003 break; 1004 } 1005 1006 case X86ISD::Wrapper: 1007 case X86ISD::WrapperRIP: 1008 if (!MatchWrapper(N, AM)) 1009 return false; 1010 break; 1011 1012 case ISD::LOAD: 1013 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1014 return false; 1015 break; 1016 1017 case ISD::FrameIndex: 1018 if (AM.BaseType == X86ISelAddressMode::RegBase && 1019 AM.Base_Reg.getNode() == 0 && 1020 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1021 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1022 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1023 return false; 1024 } 1025 break; 1026 1027 case ISD::SHL: 1028 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 1029 break; 1030 1031 if (ConstantSDNode 1032 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1033 unsigned Val = CN->getZExtValue(); 1034 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1035 // that the base operand remains free for further matching. If 1036 // the base doesn't end up getting used, a post-processing step 1037 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1038 if (Val == 1 || Val == 2 || Val == 3) { 1039 AM.Scale = 1 << Val; 1040 SDValue ShVal = N.getNode()->getOperand(0); 1041 1042 // Okay, we know that we have a scale by now. However, if the scaled 1043 // value is an add of something and a constant, we can fold the 1044 // constant into the disp field here. 1045 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1046 AM.IndexReg = ShVal.getNode()->getOperand(0); 1047 ConstantSDNode *AddVal = 1048 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1049 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1050 if (!FoldOffsetIntoAddress(Disp, AM)) 1051 return false; 1052 } 1053 1054 AM.IndexReg = ShVal; 1055 return false; 1056 } 1057 } 1058 break; 1059 1060 case ISD::SRL: { 1061 // Scale must not be used already. 1062 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1063 1064 SDValue And = N.getOperand(0); 1065 if (And.getOpcode() != ISD::AND) break; 1066 SDValue X = And.getOperand(0); 1067 1068 // We only handle up to 64-bit values here as those are what matter for 1069 // addressing mode optimizations. 1070 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1071 1072 // The mask used for the transform is expected to be post-shift, but we 1073 // found the shift first so just apply the shift to the mask before passing 1074 // it down. 1075 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1076 !isa<ConstantSDNode>(And.getOperand(1))) 1077 break; 1078 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1079 1080 // Try to fold the mask and shift into the scale, and return false if we 1081 // succeed. 1082 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1083 return false; 1084 break; 1085 } 1086 1087 case ISD::SMUL_LOHI: 1088 case ISD::UMUL_LOHI: 1089 // A mul_lohi where we need the low part can be folded as a plain multiply. 1090 if (N.getResNo() != 0) break; 1091 // FALL THROUGH 1092 case ISD::MUL: 1093 case X86ISD::MUL_IMM: 1094 // X*[3,5,9] -> X+X*[2,4,8] 1095 if (AM.BaseType == X86ISelAddressMode::RegBase && 1096 AM.Base_Reg.getNode() == 0 && 1097 AM.IndexReg.getNode() == 0) { 1098 if (ConstantSDNode 1099 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1100 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1101 CN->getZExtValue() == 9) { 1102 AM.Scale = unsigned(CN->getZExtValue())-1; 1103 1104 SDValue MulVal = N.getNode()->getOperand(0); 1105 SDValue Reg; 1106 1107 // Okay, we know that we have a scale by now. However, if the scaled 1108 // value is an add of something and a constant, we can fold the 1109 // constant into the disp field here. 1110 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1111 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1112 Reg = MulVal.getNode()->getOperand(0); 1113 ConstantSDNode *AddVal = 1114 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1115 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1116 if (FoldOffsetIntoAddress(Disp, AM)) 1117 Reg = N.getNode()->getOperand(0); 1118 } else { 1119 Reg = N.getNode()->getOperand(0); 1120 } 1121 1122 AM.IndexReg = AM.Base_Reg = Reg; 1123 return false; 1124 } 1125 } 1126 break; 1127 1128 case ISD::SUB: { 1129 // Given A-B, if A can be completely folded into the address and 1130 // the index field with the index field unused, use -B as the index. 1131 // This is a win if a has multiple parts that can be folded into 1132 // the address. Also, this saves a mov if the base register has 1133 // other uses, since it avoids a two-address sub instruction, however 1134 // it costs an additional mov if the index register has other uses. 1135 1136 // Add an artificial use to this node so that we can keep track of 1137 // it if it gets CSE'd with a different node. 1138 HandleSDNode Handle(N); 1139 1140 // Test if the LHS of the sub can be folded. 1141 X86ISelAddressMode Backup = AM; 1142 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1143 AM = Backup; 1144 break; 1145 } 1146 // Test if the index field is free for use. 1147 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1148 AM = Backup; 1149 break; 1150 } 1151 1152 int Cost = 0; 1153 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1154 // If the RHS involves a register with multiple uses, this 1155 // transformation incurs an extra mov, due to the neg instruction 1156 // clobbering its operand. 1157 if (!RHS.getNode()->hasOneUse() || 1158 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1159 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1160 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1161 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1162 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1163 ++Cost; 1164 // If the base is a register with multiple uses, this 1165 // transformation may save a mov. 1166 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1167 AM.Base_Reg.getNode() && 1168 !AM.Base_Reg.getNode()->hasOneUse()) || 1169 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1170 --Cost; 1171 // If the folded LHS was interesting, this transformation saves 1172 // address arithmetic. 1173 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1174 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1175 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1176 --Cost; 1177 // If it doesn't look like it may be an overall win, don't do it. 1178 if (Cost >= 0) { 1179 AM = Backup; 1180 break; 1181 } 1182 1183 // Ok, the transformation is legal and appears profitable. Go for it. 1184 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1185 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1186 AM.IndexReg = Neg; 1187 AM.Scale = 1; 1188 1189 // Insert the new nodes into the topological ordering. 1190 InsertDAGNode(*CurDAG, N, Zero); 1191 InsertDAGNode(*CurDAG, N, Neg); 1192 return false; 1193 } 1194 1195 case ISD::ADD: { 1196 // Add an artificial use to this node so that we can keep track of 1197 // it if it gets CSE'd with a different node. 1198 HandleSDNode Handle(N); 1199 1200 X86ISelAddressMode Backup = AM; 1201 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1202 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1203 return false; 1204 AM = Backup; 1205 1206 // Try again after commuting the operands. 1207 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1208 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1209 return false; 1210 AM = Backup; 1211 1212 // If we couldn't fold both operands into the address at the same time, 1213 // see if we can just put each operand into a register and fold at least 1214 // the add. 1215 if (AM.BaseType == X86ISelAddressMode::RegBase && 1216 !AM.Base_Reg.getNode() && 1217 !AM.IndexReg.getNode()) { 1218 N = Handle.getValue(); 1219 AM.Base_Reg = N.getOperand(0); 1220 AM.IndexReg = N.getOperand(1); 1221 AM.Scale = 1; 1222 return false; 1223 } 1224 N = Handle.getValue(); 1225 break; 1226 } 1227 1228 case ISD::OR: 1229 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1230 if (CurDAG->isBaseWithConstantOffset(N)) { 1231 X86ISelAddressMode Backup = AM; 1232 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1233 1234 // Start with the LHS as an addr mode. 1235 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1236 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1237 return false; 1238 AM = Backup; 1239 } 1240 break; 1241 1242 case ISD::AND: { 1243 // Perform some heroic transforms on an and of a constant-count shift 1244 // with a constant to enable use of the scaled offset field. 1245 1246 // Scale must not be used already. 1247 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1248 1249 SDValue Shift = N.getOperand(0); 1250 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1251 SDValue X = Shift.getOperand(0); 1252 1253 // We only handle up to 64-bit values here as those are what matter for 1254 // addressing mode optimizations. 1255 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1256 1257 if (!isa<ConstantSDNode>(N.getOperand(1))) 1258 break; 1259 uint64_t Mask = N.getConstantOperandVal(1); 1260 1261 // Try to fold the mask and shift into an extract and scale. 1262 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1263 return false; 1264 1265 // Try to fold the mask and shift directly into the scale. 1266 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1267 return false; 1268 1269 // Try to swap the mask and shift to place shifts which can be done as 1270 // a scale on the outside of the mask. 1271 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1272 return false; 1273 break; 1274 } 1275 } 1276 1277 return MatchAddressBase(N, AM); 1278} 1279 1280/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1281/// specified addressing mode without any further recursion. 1282bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1283 // Is the base register already occupied? 1284 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1285 // If so, check to see if the scale index register is set. 1286 if (AM.IndexReg.getNode() == 0) { 1287 AM.IndexReg = N; 1288 AM.Scale = 1; 1289 return false; 1290 } 1291 1292 // Otherwise, we cannot select it. 1293 return true; 1294 } 1295 1296 // Default, generate it as a register. 1297 AM.BaseType = X86ISelAddressMode::RegBase; 1298 AM.Base_Reg = N; 1299 return false; 1300} 1301 1302/// SelectAddr - returns true if it is able pattern match an addressing mode. 1303/// It returns the operands which make up the maximal addressing mode it can 1304/// match by reference. 1305/// 1306/// Parent is the parent node of the addr operand that is being matched. It 1307/// is always a load, store, atomic node, or null. It is only null when 1308/// checking memory operands for inline asm nodes. 1309bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1310 SDValue &Scale, SDValue &Index, 1311 SDValue &Disp, SDValue &Segment) { 1312 X86ISelAddressMode AM; 1313 1314 if (Parent && 1315 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1316 // that are not a MemSDNode, and thus don't have proper addrspace info. 1317 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1318 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1319 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 1320 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 1321 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 1322 unsigned AddrSpace = 1323 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1324 // AddrSpace 256 -> GS, 257 -> FS. 1325 if (AddrSpace == 256) 1326 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1327 if (AddrSpace == 257) 1328 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1329 } 1330 1331 if (MatchAddress(N, AM)) 1332 return false; 1333 1334 MVT VT = N.getSimpleValueType(); 1335 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1336 if (!AM.Base_Reg.getNode()) 1337 AM.Base_Reg = CurDAG->getRegister(0, VT); 1338 } 1339 1340 if (!AM.IndexReg.getNode()) 1341 AM.IndexReg = CurDAG->getRegister(0, VT); 1342 1343 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1344 return true; 1345} 1346 1347/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1348/// match a load whose top elements are either undef or zeros. The load flavor 1349/// is derived from the type of N, which is either v4f32 or v2f64. 1350/// 1351/// We also return: 1352/// PatternChainNode: this is the matched node that has a chain input and 1353/// output. 1354bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1355 SDValue N, SDValue &Base, 1356 SDValue &Scale, SDValue &Index, 1357 SDValue &Disp, SDValue &Segment, 1358 SDValue &PatternNodeWithChain) { 1359 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1360 PatternNodeWithChain = N.getOperand(0); 1361 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1362 PatternNodeWithChain.hasOneUse() && 1363 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1364 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1365 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1366 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1367 return false; 1368 return true; 1369 } 1370 } 1371 1372 // Also handle the case where we explicitly require zeros in the top 1373 // elements. This is a vector shuffle from the zero vector. 1374 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1375 // Check to see if the top elements are all zeros (or bitcast of zeros). 1376 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1377 N.getOperand(0).getNode()->hasOneUse() && 1378 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1379 N.getOperand(0).getOperand(0).hasOneUse() && 1380 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1381 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1382 // Okay, this is a zero extending load. Fold it. 1383 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1384 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1385 return false; 1386 PatternNodeWithChain = SDValue(LD, 0); 1387 return true; 1388 } 1389 return false; 1390} 1391 1392 1393bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) { 1394 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1395 uint64_t ImmVal = CN->getZExtValue(); 1396 if ((uint32_t)ImmVal != (uint64_t)ImmVal) 1397 return false; 1398 1399 Imm = CurDAG->getTargetConstant(ImmVal, MVT::i64); 1400 return true; 1401 } 1402 1403 // In static codegen with small code model, we can get the address of a label 1404 // into a register with 'movl'. TableGen has already made sure we're looking 1405 // at a label of some kind. 1406 assert(N->getOpcode() == X86ISD::Wrapper && 1407 "Unexpected node type for MOV32ri64"); 1408 N = N.getOperand(0); 1409 1410 if (N->getOpcode() != ISD::TargetConstantPool && 1411 N->getOpcode() != ISD::TargetJumpTable && 1412 N->getOpcode() != ISD::TargetGlobalAddress && 1413 N->getOpcode() != ISD::TargetExternalSymbol && 1414 N->getOpcode() != ISD::TargetBlockAddress) 1415 return false; 1416 1417 Imm = N; 1418 return TM.getCodeModel() == CodeModel::Small; 1419} 1420 1421bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base, 1422 SDValue &Scale, SDValue &Index, 1423 SDValue &Disp, SDValue &Segment) { 1424 if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment)) 1425 return false; 1426 1427 SDLoc DL(N); 1428 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); 1429 if (RN && RN->getReg() == 0) 1430 Base = CurDAG->getRegister(0, MVT::i64); 1431 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(N)) { 1432 // Base could already be %rip, particularly in the x32 ABI. 1433 Base = SDValue(CurDAG->getMachineNode( 1434 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1435 CurDAG->getTargetConstant(0, MVT::i64), 1436 Base, 1437 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1438 0); 1439 } 1440 1441 RN = dyn_cast<RegisterSDNode>(Index); 1442 if (RN && RN->getReg() == 0) 1443 Index = CurDAG->getRegister(0, MVT::i64); 1444 else { 1445 assert(Index.getValueType() == MVT::i32 && 1446 "Expect to be extending 32-bit registers for use in LEA"); 1447 Index = SDValue(CurDAG->getMachineNode( 1448 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1449 CurDAG->getTargetConstant(0, MVT::i64), 1450 Index, 1451 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 1452 0); 1453 } 1454 1455 return true; 1456} 1457 1458/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1459/// mode it matches can be cost effectively emitted as an LEA instruction. 1460bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1461 SDValue &Base, SDValue &Scale, 1462 SDValue &Index, SDValue &Disp, 1463 SDValue &Segment) { 1464 X86ISelAddressMode AM; 1465 1466 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1467 // segments. 1468 SDValue Copy = AM.Segment; 1469 SDValue T = CurDAG->getRegister(0, MVT::i32); 1470 AM.Segment = T; 1471 if (MatchAddress(N, AM)) 1472 return false; 1473 assert (T == AM.Segment); 1474 AM.Segment = Copy; 1475 1476 MVT VT = N.getSimpleValueType(); 1477 unsigned Complexity = 0; 1478 if (AM.BaseType == X86ISelAddressMode::RegBase) 1479 if (AM.Base_Reg.getNode()) 1480 Complexity = 1; 1481 else 1482 AM.Base_Reg = CurDAG->getRegister(0, VT); 1483 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1484 Complexity = 4; 1485 1486 if (AM.IndexReg.getNode()) 1487 Complexity++; 1488 else 1489 AM.IndexReg = CurDAG->getRegister(0, VT); 1490 1491 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1492 // a simple shift. 1493 if (AM.Scale > 1) 1494 Complexity++; 1495 1496 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1497 // to a LEA. This is determined with some expermentation but is by no means 1498 // optimal (especially for code size consideration). LEA is nice because of 1499 // its three-address nature. Tweak the cost function again when we can run 1500 // convertToThreeAddress() at register allocation time. 1501 if (AM.hasSymbolicDisplacement()) { 1502 // For X86-64, we should always use lea to materialize RIP relative 1503 // addresses. 1504 if (Subtarget->is64Bit()) 1505 Complexity = 4; 1506 else 1507 Complexity += 2; 1508 } 1509 1510 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1511 Complexity++; 1512 1513 // If it isn't worth using an LEA, reject it. 1514 if (Complexity <= 2) 1515 return false; 1516 1517 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1518 return true; 1519} 1520 1521/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1522bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1523 SDValue &Scale, SDValue &Index, 1524 SDValue &Disp, SDValue &Segment) { 1525 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1526 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1527 1528 X86ISelAddressMode AM; 1529 AM.GV = GA->getGlobal(); 1530 AM.Disp += GA->getOffset(); 1531 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1532 AM.SymbolFlags = GA->getTargetFlags(); 1533 1534 if (N.getValueType() == MVT::i32) { 1535 AM.Scale = 1; 1536 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1537 } else { 1538 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1539 } 1540 1541 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1542 return true; 1543} 1544 1545 1546bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1547 SDValue &Base, SDValue &Scale, 1548 SDValue &Index, SDValue &Disp, 1549 SDValue &Segment) { 1550 if (!ISD::isNON_EXTLoad(N.getNode()) || 1551 !IsProfitableToFold(N, P, P) || 1552 !IsLegalToFold(N, P, P, OptLevel)) 1553 return false; 1554 1555 return SelectAddr(N.getNode(), 1556 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1557} 1558 1559/// getGlobalBaseReg - Return an SDNode that returns the value of 1560/// the global base register. Output instructions required to 1561/// initialize the global base register, if necessary. 1562/// 1563SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1564 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1565 return CurDAG->getRegister(GlobalBaseReg, 1566 getTargetLowering()->getPointerTy()).getNode(); 1567} 1568 1569SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1570 SDValue Chain = Node->getOperand(0); 1571 SDValue In1 = Node->getOperand(1); 1572 SDValue In2L = Node->getOperand(2); 1573 SDValue In2H = Node->getOperand(3); 1574 1575 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1576 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1577 return NULL; 1578 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1579 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1580 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1581 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), 1582 MVT::i32, MVT::i32, MVT::Other, Ops); 1583 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1584 return ResNode; 1585} 1586 1587/// Atomic opcode table 1588/// 1589enum AtomicOpc { 1590 ADD, 1591 SUB, 1592 INC, 1593 DEC, 1594 OR, 1595 AND, 1596 XOR, 1597 AtomicOpcEnd 1598}; 1599 1600enum AtomicSz { 1601 ConstantI8, 1602 I8, 1603 SextConstantI16, 1604 ConstantI16, 1605 I16, 1606 SextConstantI32, 1607 ConstantI32, 1608 I32, 1609 SextConstantI64, 1610 ConstantI64, 1611 I64, 1612 AtomicSzEnd 1613}; 1614 1615static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1616 { 1617 X86::LOCK_ADD8mi, 1618 X86::LOCK_ADD8mr, 1619 X86::LOCK_ADD16mi8, 1620 X86::LOCK_ADD16mi, 1621 X86::LOCK_ADD16mr, 1622 X86::LOCK_ADD32mi8, 1623 X86::LOCK_ADD32mi, 1624 X86::LOCK_ADD32mr, 1625 X86::LOCK_ADD64mi8, 1626 X86::LOCK_ADD64mi32, 1627 X86::LOCK_ADD64mr, 1628 }, 1629 { 1630 X86::LOCK_SUB8mi, 1631 X86::LOCK_SUB8mr, 1632 X86::LOCK_SUB16mi8, 1633 X86::LOCK_SUB16mi, 1634 X86::LOCK_SUB16mr, 1635 X86::LOCK_SUB32mi8, 1636 X86::LOCK_SUB32mi, 1637 X86::LOCK_SUB32mr, 1638 X86::LOCK_SUB64mi8, 1639 X86::LOCK_SUB64mi32, 1640 X86::LOCK_SUB64mr, 1641 }, 1642 { 1643 0, 1644 X86::LOCK_INC8m, 1645 0, 1646 0, 1647 X86::LOCK_INC16m, 1648 0, 1649 0, 1650 X86::LOCK_INC32m, 1651 0, 1652 0, 1653 X86::LOCK_INC64m, 1654 }, 1655 { 1656 0, 1657 X86::LOCK_DEC8m, 1658 0, 1659 0, 1660 X86::LOCK_DEC16m, 1661 0, 1662 0, 1663 X86::LOCK_DEC32m, 1664 0, 1665 0, 1666 X86::LOCK_DEC64m, 1667 }, 1668 { 1669 X86::LOCK_OR8mi, 1670 X86::LOCK_OR8mr, 1671 X86::LOCK_OR16mi8, 1672 X86::LOCK_OR16mi, 1673 X86::LOCK_OR16mr, 1674 X86::LOCK_OR32mi8, 1675 X86::LOCK_OR32mi, 1676 X86::LOCK_OR32mr, 1677 X86::LOCK_OR64mi8, 1678 X86::LOCK_OR64mi32, 1679 X86::LOCK_OR64mr, 1680 }, 1681 { 1682 X86::LOCK_AND8mi, 1683 X86::LOCK_AND8mr, 1684 X86::LOCK_AND16mi8, 1685 X86::LOCK_AND16mi, 1686 X86::LOCK_AND16mr, 1687 X86::LOCK_AND32mi8, 1688 X86::LOCK_AND32mi, 1689 X86::LOCK_AND32mr, 1690 X86::LOCK_AND64mi8, 1691 X86::LOCK_AND64mi32, 1692 X86::LOCK_AND64mr, 1693 }, 1694 { 1695 X86::LOCK_XOR8mi, 1696 X86::LOCK_XOR8mr, 1697 X86::LOCK_XOR16mi8, 1698 X86::LOCK_XOR16mi, 1699 X86::LOCK_XOR16mr, 1700 X86::LOCK_XOR32mi8, 1701 X86::LOCK_XOR32mi, 1702 X86::LOCK_XOR32mr, 1703 X86::LOCK_XOR64mi8, 1704 X86::LOCK_XOR64mi32, 1705 X86::LOCK_XOR64mr, 1706 } 1707}; 1708 1709// Return the target constant operand for atomic-load-op and do simple 1710// translations, such as from atomic-load-add to lock-sub. The return value is 1711// one of the following 3 cases: 1712// + target-constant, the operand could be supported as a target constant. 1713// + empty, the operand is not needed any more with the new op selected. 1714// + non-empty, otherwise. 1715static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, 1716 SDLoc dl, 1717 enum AtomicOpc &Op, MVT NVT, 1718 SDValue Val) { 1719 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { 1720 int64_t CNVal = CN->getSExtValue(); 1721 // Quit if not 32-bit imm. 1722 if ((int32_t)CNVal != CNVal) 1723 return Val; 1724 // For atomic-load-add, we could do some optimizations. 1725 if (Op == ADD) { 1726 // Translate to INC/DEC if ADD by 1 or -1. 1727 if ((CNVal == 1) || (CNVal == -1)) { 1728 Op = (CNVal == 1) ? INC : DEC; 1729 // No more constant operand after being translated into INC/DEC. 1730 return SDValue(); 1731 } 1732 // Translate to SUB if ADD by negative value. 1733 if (CNVal < 0) { 1734 Op = SUB; 1735 CNVal = -CNVal; 1736 } 1737 } 1738 return CurDAG->getTargetConstant(CNVal, NVT); 1739 } 1740 1741 // If the value operand is single-used, try to optimize it. 1742 if (Op == ADD && Val.hasOneUse()) { 1743 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). 1744 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { 1745 Op = SUB; 1746 return Val.getOperand(1); 1747 } 1748 // A special case for i16, which needs truncating as, in most cases, it's 1749 // promoted to i32. We will translate 1750 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) 1751 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && 1752 Val.getOperand(0).getOpcode() == ISD::SUB && 1753 X86::isZeroNode(Val.getOperand(0).getOperand(0))) { 1754 Op = SUB; 1755 Val = Val.getOperand(0); 1756 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, 1757 Val.getOperand(1)); 1758 } 1759 } 1760 1761 return Val; 1762} 1763 1764SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) { 1765 if (Node->hasAnyUseOfValue(0)) 1766 return 0; 1767 1768 SDLoc dl(Node); 1769 1770 // Optimize common patterns for __sync_or_and_fetch and similar arith 1771 // operations where the result is not used. This allows us to use the "lock" 1772 // version of the arithmetic instruction. 1773 SDValue Chain = Node->getOperand(0); 1774 SDValue Ptr = Node->getOperand(1); 1775 SDValue Val = Node->getOperand(2); 1776 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1777 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1778 return 0; 1779 1780 // Which index into the table. 1781 enum AtomicOpc Op; 1782 switch (Node->getOpcode()) { 1783 default: 1784 return 0; 1785 case ISD::ATOMIC_LOAD_OR: 1786 Op = OR; 1787 break; 1788 case ISD::ATOMIC_LOAD_AND: 1789 Op = AND; 1790 break; 1791 case ISD::ATOMIC_LOAD_XOR: 1792 Op = XOR; 1793 break; 1794 case ISD::ATOMIC_LOAD_ADD: 1795 Op = ADD; 1796 break; 1797 } 1798 1799 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); 1800 bool isUnOp = !Val.getNode(); 1801 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); 1802 1803 unsigned Opc = 0; 1804 switch (NVT.SimpleTy) { 1805 default: return 0; 1806 case MVT::i8: 1807 if (isCN) 1808 Opc = AtomicOpcTbl[Op][ConstantI8]; 1809 else 1810 Opc = AtomicOpcTbl[Op][I8]; 1811 break; 1812 case MVT::i16: 1813 if (isCN) { 1814 if (immSext8(Val.getNode())) 1815 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1816 else 1817 Opc = AtomicOpcTbl[Op][ConstantI16]; 1818 } else 1819 Opc = AtomicOpcTbl[Op][I16]; 1820 break; 1821 case MVT::i32: 1822 if (isCN) { 1823 if (immSext8(Val.getNode())) 1824 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1825 else 1826 Opc = AtomicOpcTbl[Op][ConstantI32]; 1827 } else 1828 Opc = AtomicOpcTbl[Op][I32]; 1829 break; 1830 case MVT::i64: 1831 Opc = AtomicOpcTbl[Op][I64]; 1832 if (isCN) { 1833 if (immSext8(Val.getNode())) 1834 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1835 else if (i64immSExt32(Val.getNode())) 1836 Opc = AtomicOpcTbl[Op][ConstantI64]; 1837 } 1838 break; 1839 } 1840 1841 assert(Opc != 0 && "Invalid arith lock transform!"); 1842 1843 SDValue Ret; 1844 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1845 dl, NVT), 0); 1846 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1847 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1848 if (isUnOp) { 1849 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1850 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1851 } else { 1852 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1853 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0); 1854 } 1855 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1856 SDValue RetVals[] = { Undef, Ret }; 1857 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1858} 1859 1860/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1861/// any uses which require the SF or OF bits to be accurate. 1862static bool HasNoSignedComparisonUses(SDNode *N) { 1863 // Examine each user of the node. 1864 for (SDNode::use_iterator UI = N->use_begin(), 1865 UE = N->use_end(); UI != UE; ++UI) { 1866 // Only examine CopyToReg uses. 1867 if (UI->getOpcode() != ISD::CopyToReg) 1868 return false; 1869 // Only examine CopyToReg uses that copy to EFLAGS. 1870 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1871 X86::EFLAGS) 1872 return false; 1873 // Examine each user of the CopyToReg use. 1874 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1875 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1876 // Only examine the Flag result. 1877 if (FlagUI.getUse().getResNo() != 1) continue; 1878 // Anything unusual: assume conservatively. 1879 if (!FlagUI->isMachineOpcode()) return false; 1880 // Examine the opcode of the user. 1881 switch (FlagUI->getMachineOpcode()) { 1882 // These comparisons don't treat the most significant bit specially. 1883 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1884 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1885 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1886 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1887 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1888 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1889 case X86::CMOVA16rr: case X86::CMOVA16rm: 1890 case X86::CMOVA32rr: case X86::CMOVA32rm: 1891 case X86::CMOVA64rr: case X86::CMOVA64rm: 1892 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1893 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1894 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1895 case X86::CMOVB16rr: case X86::CMOVB16rm: 1896 case X86::CMOVB32rr: case X86::CMOVB32rm: 1897 case X86::CMOVB64rr: case X86::CMOVB64rm: 1898 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1899 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1900 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1901 case X86::CMOVE16rr: case X86::CMOVE16rm: 1902 case X86::CMOVE32rr: case X86::CMOVE32rm: 1903 case X86::CMOVE64rr: case X86::CMOVE64rm: 1904 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1905 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1906 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1907 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1908 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1909 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1910 case X86::CMOVP16rr: case X86::CMOVP16rm: 1911 case X86::CMOVP32rr: case X86::CMOVP32rm: 1912 case X86::CMOVP64rr: case X86::CMOVP64rm: 1913 continue; 1914 // Anything else: assume conservatively. 1915 default: return false; 1916 } 1917 } 1918 } 1919 return true; 1920} 1921 1922/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1923/// is suitable for doing the {load; increment or decrement; store} to modify 1924/// transformation. 1925static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1926 SDValue StoredVal, SelectionDAG *CurDAG, 1927 LoadSDNode* &LoadNode, SDValue &InputChain) { 1928 1929 // is the value stored the result of a DEC or INC? 1930 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1931 1932 // is the stored value result 0 of the load? 1933 if (StoredVal.getResNo() != 0) return false; 1934 1935 // are there other uses of the loaded value than the inc or dec? 1936 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1937 1938 // is the store non-extending and non-indexed? 1939 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1940 return false; 1941 1942 SDValue Load = StoredVal->getOperand(0); 1943 // Is the stored value a non-extending and non-indexed load? 1944 if (!ISD::isNormalLoad(Load.getNode())) return false; 1945 1946 // Return LoadNode by reference. 1947 LoadNode = cast<LoadSDNode>(Load); 1948 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1949 EVT LdVT = LoadNode->getMemoryVT(); 1950 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1951 LdVT != MVT::i8) 1952 return false; 1953 1954 // Is store the only read of the loaded value? 1955 if (!Load.hasOneUse()) 1956 return false; 1957 1958 // Is the address of the store the same as the load? 1959 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1960 LoadNode->getOffset() != StoreNode->getOffset()) 1961 return false; 1962 1963 // Check if the chain is produced by the load or is a TokenFactor with 1964 // the load output chain as an operand. Return InputChain by reference. 1965 SDValue Chain = StoreNode->getChain(); 1966 1967 bool ChainCheck = false; 1968 if (Chain == Load.getValue(1)) { 1969 ChainCheck = true; 1970 InputChain = LoadNode->getChain(); 1971 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1972 SmallVector<SDValue, 4> ChainOps; 1973 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1974 SDValue Op = Chain.getOperand(i); 1975 if (Op == Load.getValue(1)) { 1976 ChainCheck = true; 1977 continue; 1978 } 1979 1980 // Make sure using Op as part of the chain would not cause a cycle here. 1981 // In theory, we could check whether the chain node is a predecessor of 1982 // the load. But that can be very expensive. Instead visit the uses and 1983 // make sure they all have smaller node id than the load. 1984 int LoadId = LoadNode->getNodeId(); 1985 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1986 UE = UI->use_end(); UI != UE; ++UI) { 1987 if (UI.getUse().getResNo() != 0) 1988 continue; 1989 if (UI->getNodeId() > LoadId) 1990 return false; 1991 } 1992 1993 ChainOps.push_back(Op); 1994 } 1995 1996 if (ChainCheck) 1997 // Make a new TokenFactor with all the other input chains except 1998 // for the load. 1999 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), 2000 MVT::Other, &ChainOps[0], ChainOps.size()); 2001 } 2002 if (!ChainCheck) 2003 return false; 2004 2005 return true; 2006} 2007 2008/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 2009/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 2010static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 2011 if (Opc == X86ISD::DEC) { 2012 if (LdVT == MVT::i64) return X86::DEC64m; 2013 if (LdVT == MVT::i32) return X86::DEC32m; 2014 if (LdVT == MVT::i16) return X86::DEC16m; 2015 if (LdVT == MVT::i8) return X86::DEC8m; 2016 } else { 2017 assert(Opc == X86ISD::INC && "unrecognized opcode"); 2018 if (LdVT == MVT::i64) return X86::INC64m; 2019 if (LdVT == MVT::i32) return X86::INC32m; 2020 if (LdVT == MVT::i16) return X86::INC16m; 2021 if (LdVT == MVT::i8) return X86::INC8m; 2022 } 2023 llvm_unreachable("unrecognized size for LdVT"); 2024} 2025 2026/// SelectGather - Customized ISel for GATHER operations. 2027/// 2028SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 2029 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 2030 SDValue Chain = Node->getOperand(0); 2031 SDValue VSrc = Node->getOperand(2); 2032 SDValue Base = Node->getOperand(3); 2033 SDValue VIdx = Node->getOperand(4); 2034 SDValue VMask = Node->getOperand(5); 2035 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 2036 if (!Scale) 2037 return 0; 2038 2039 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 2040 MVT::Other); 2041 2042 // Memory Operands: Base, Scale, Index, Disp, Segment 2043 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 2044 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 2045 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 2046 Disp, Segment, VMask, Chain}; 2047 SDNode *ResNode = CurDAG->getMachineNode(Opc, SDLoc(Node), VTs, Ops); 2048 // Node has 2 outputs: VDst and MVT::Other. 2049 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 2050 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 2051 // of ResNode. 2052 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 2053 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 2054 return ResNode; 2055} 2056 2057SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 2058 MVT NVT = Node->getSimpleValueType(0); 2059 unsigned Opc, MOpc; 2060 unsigned Opcode = Node->getOpcode(); 2061 SDLoc dl(Node); 2062 2063 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 2064 2065 if (Node->isMachineOpcode()) { 2066 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 2067 Node->setNodeId(-1); 2068 return NULL; // Already selected. 2069 } 2070 2071 switch (Opcode) { 2072 default: break; 2073 case ISD::INTRINSIC_W_CHAIN: { 2074 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2075 switch (IntNo) { 2076 default: break; 2077 case Intrinsic::x86_avx2_gather_d_pd: 2078 case Intrinsic::x86_avx2_gather_d_pd_256: 2079 case Intrinsic::x86_avx2_gather_q_pd: 2080 case Intrinsic::x86_avx2_gather_q_pd_256: 2081 case Intrinsic::x86_avx2_gather_d_ps: 2082 case Intrinsic::x86_avx2_gather_d_ps_256: 2083 case Intrinsic::x86_avx2_gather_q_ps: 2084 case Intrinsic::x86_avx2_gather_q_ps_256: 2085 case Intrinsic::x86_avx2_gather_d_q: 2086 case Intrinsic::x86_avx2_gather_d_q_256: 2087 case Intrinsic::x86_avx2_gather_q_q: 2088 case Intrinsic::x86_avx2_gather_q_q_256: 2089 case Intrinsic::x86_avx2_gather_d_d: 2090 case Intrinsic::x86_avx2_gather_d_d_256: 2091 case Intrinsic::x86_avx2_gather_q_d: 2092 case Intrinsic::x86_avx2_gather_q_d_256: { 2093 if (!Subtarget->hasAVX2()) 2094 break; 2095 unsigned Opc; 2096 switch (IntNo) { 2097 default: llvm_unreachable("Impossible intrinsic"); 2098 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2099 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2100 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2101 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2102 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2103 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2104 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2105 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2106 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2107 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2108 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2109 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2110 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2111 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2112 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2113 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2114 } 2115 SDNode *RetVal = SelectGather(Node, Opc); 2116 if (RetVal) 2117 // We already called ReplaceUses inside SelectGather. 2118 return NULL; 2119 break; 2120 } 2121 } 2122 break; 2123 } 2124 case X86ISD::GlobalBaseReg: 2125 return getGlobalBaseReg(); 2126 2127 2128 case X86ISD::ATOMOR64_DAG: 2129 case X86ISD::ATOMXOR64_DAG: 2130 case X86ISD::ATOMADD64_DAG: 2131 case X86ISD::ATOMSUB64_DAG: 2132 case X86ISD::ATOMNAND64_DAG: 2133 case X86ISD::ATOMAND64_DAG: 2134 case X86ISD::ATOMMAX64_DAG: 2135 case X86ISD::ATOMMIN64_DAG: 2136 case X86ISD::ATOMUMAX64_DAG: 2137 case X86ISD::ATOMUMIN64_DAG: 2138 case X86ISD::ATOMSWAP64_DAG: { 2139 unsigned Opc; 2140 switch (Opcode) { 2141 default: llvm_unreachable("Impossible opcode"); 2142 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break; 2143 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break; 2144 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break; 2145 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; 2146 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; 2147 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; 2148 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break; 2149 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break; 2150 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break; 2151 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break; 2152 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; 2153 } 2154 SDNode *RetVal = SelectAtomic64(Node, Opc); 2155 if (RetVal) 2156 return RetVal; 2157 break; 2158 } 2159 2160 case ISD::ATOMIC_LOAD_XOR: 2161 case ISD::ATOMIC_LOAD_AND: 2162 case ISD::ATOMIC_LOAD_OR: 2163 case ISD::ATOMIC_LOAD_ADD: { 2164 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2165 if (RetVal) 2166 return RetVal; 2167 break; 2168 } 2169 case ISD::AND: 2170 case ISD::OR: 2171 case ISD::XOR: { 2172 // For operations of the form (x << C1) op C2, check if we can use a smaller 2173 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2174 SDValue N0 = Node->getOperand(0); 2175 SDValue N1 = Node->getOperand(1); 2176 2177 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2178 break; 2179 2180 // i8 is unshrinkable, i16 should be promoted to i32. 2181 if (NVT != MVT::i32 && NVT != MVT::i64) 2182 break; 2183 2184 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2185 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2186 if (!Cst || !ShlCst) 2187 break; 2188 2189 int64_t Val = Cst->getSExtValue(); 2190 uint64_t ShlVal = ShlCst->getZExtValue(); 2191 2192 // Make sure that we don't change the operation by removing bits. 2193 // This only matters for OR and XOR, AND is unaffected. 2194 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2195 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2196 break; 2197 2198 unsigned ShlOp, Op; 2199 MVT CstVT = NVT; 2200 2201 // Check the minimum bitwidth for the new constant. 2202 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2203 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2204 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2205 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2206 CstVT = MVT::i8; 2207 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2208 CstVT = MVT::i32; 2209 2210 // Bail if there is no smaller encoding. 2211 if (NVT == CstVT) 2212 break; 2213 2214 switch (NVT.SimpleTy) { 2215 default: llvm_unreachable("Unsupported VT!"); 2216 case MVT::i32: 2217 assert(CstVT == MVT::i8); 2218 ShlOp = X86::SHL32ri; 2219 2220 switch (Opcode) { 2221 default: llvm_unreachable("Impossible opcode"); 2222 case ISD::AND: Op = X86::AND32ri8; break; 2223 case ISD::OR: Op = X86::OR32ri8; break; 2224 case ISD::XOR: Op = X86::XOR32ri8; break; 2225 } 2226 break; 2227 case MVT::i64: 2228 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2229 ShlOp = X86::SHL64ri; 2230 2231 switch (Opcode) { 2232 default: llvm_unreachable("Impossible opcode"); 2233 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2234 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2235 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2236 } 2237 break; 2238 } 2239 2240 // Emit the smaller op and the shift. 2241 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2242 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2243 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2244 getI8Imm(ShlVal)); 2245 } 2246 case X86ISD::UMUL: { 2247 SDValue N0 = Node->getOperand(0); 2248 SDValue N1 = Node->getOperand(1); 2249 2250 unsigned LoReg; 2251 switch (NVT.SimpleTy) { 2252 default: llvm_unreachable("Unsupported VT!"); 2253 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2254 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2255 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2256 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2257 } 2258 2259 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2260 N0, SDValue()).getValue(1); 2261 2262 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2263 SDValue Ops[] = {N1, InFlag}; 2264 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2265 2266 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2267 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2268 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2269 return NULL; 2270 } 2271 2272 case ISD::SMUL_LOHI: 2273 case ISD::UMUL_LOHI: { 2274 SDValue N0 = Node->getOperand(0); 2275 SDValue N1 = Node->getOperand(1); 2276 2277 bool isSigned = Opcode == ISD::SMUL_LOHI; 2278 bool hasBMI2 = Subtarget->hasBMI2(); 2279 if (!isSigned) { 2280 switch (NVT.SimpleTy) { 2281 default: llvm_unreachable("Unsupported VT!"); 2282 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2283 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2284 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2285 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2286 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2287 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2288 } 2289 } else { 2290 switch (NVT.SimpleTy) { 2291 default: llvm_unreachable("Unsupported VT!"); 2292 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2293 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2294 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2295 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2296 } 2297 } 2298 2299 unsigned SrcReg, LoReg, HiReg; 2300 switch (Opc) { 2301 default: llvm_unreachable("Unknown MUL opcode!"); 2302 case X86::IMUL8r: 2303 case X86::MUL8r: 2304 SrcReg = LoReg = X86::AL; HiReg = X86::AH; 2305 break; 2306 case X86::IMUL16r: 2307 case X86::MUL16r: 2308 SrcReg = LoReg = X86::AX; HiReg = X86::DX; 2309 break; 2310 case X86::IMUL32r: 2311 case X86::MUL32r: 2312 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2313 break; 2314 case X86::IMUL64r: 2315 case X86::MUL64r: 2316 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2317 break; 2318 case X86::MULX32rr: 2319 SrcReg = X86::EDX; LoReg = HiReg = 0; 2320 break; 2321 case X86::MULX64rr: 2322 SrcReg = X86::RDX; LoReg = HiReg = 0; 2323 break; 2324 } 2325 2326 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2327 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2328 // Multiply is commmutative. 2329 if (!foldedLoad) { 2330 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2331 if (foldedLoad) 2332 std::swap(N0, N1); 2333 } 2334 2335 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2336 N0, SDValue()).getValue(1); 2337 SDValue ResHi, ResLo; 2338 2339 if (foldedLoad) { 2340 SDValue Chain; 2341 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2342 InFlag }; 2343 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 2344 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 2345 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2346 ResHi = SDValue(CNode, 0); 2347 ResLo = SDValue(CNode, 1); 2348 Chain = SDValue(CNode, 2); 2349 InFlag = SDValue(CNode, 3); 2350 } else { 2351 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 2352 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2353 Chain = SDValue(CNode, 0); 2354 InFlag = SDValue(CNode, 1); 2355 } 2356 2357 // Update the chain. 2358 ReplaceUses(N1.getValue(1), Chain); 2359 } else { 2360 SDValue Ops[] = { N1, InFlag }; 2361 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 2362 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 2363 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2364 ResHi = SDValue(CNode, 0); 2365 ResLo = SDValue(CNode, 1); 2366 InFlag = SDValue(CNode, 2); 2367 } else { 2368 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 2369 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2370 InFlag = SDValue(CNode, 0); 2371 } 2372 } 2373 2374 // Prevent use of AH in a REX instruction by referencing AX instead. 2375 if (HiReg == X86::AH && Subtarget->is64Bit() && 2376 !SDValue(Node, 1).use_empty()) { 2377 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2378 X86::AX, MVT::i16, InFlag); 2379 InFlag = Result.getValue(2); 2380 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2381 // registers. 2382 if (!SDValue(Node, 0).use_empty()) 2383 ReplaceUses(SDValue(Node, 1), 2384 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2385 2386 // Shift AX down 8 bits. 2387 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2388 Result, 2389 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2390 // Then truncate it down to i8. 2391 ReplaceUses(SDValue(Node, 1), 2392 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2393 } 2394 // Copy the low half of the result, if it is needed. 2395 if (!SDValue(Node, 0).use_empty()) { 2396 if (ResLo.getNode() == 0) { 2397 assert(LoReg && "Register for low half is not defined!"); 2398 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 2399 InFlag); 2400 InFlag = ResLo.getValue(2); 2401 } 2402 ReplaceUses(SDValue(Node, 0), ResLo); 2403 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); 2404 } 2405 // Copy the high half of the result, if it is needed. 2406 if (!SDValue(Node, 1).use_empty()) { 2407 if (ResHi.getNode() == 0) { 2408 assert(HiReg && "Register for high half is not defined!"); 2409 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 2410 InFlag); 2411 InFlag = ResHi.getValue(2); 2412 } 2413 ReplaceUses(SDValue(Node, 1), ResHi); 2414 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); 2415 } 2416 2417 return NULL; 2418 } 2419 2420 case ISD::SDIVREM: 2421 case ISD::UDIVREM: { 2422 SDValue N0 = Node->getOperand(0); 2423 SDValue N1 = Node->getOperand(1); 2424 2425 bool isSigned = Opcode == ISD::SDIVREM; 2426 if (!isSigned) { 2427 switch (NVT.SimpleTy) { 2428 default: llvm_unreachable("Unsupported VT!"); 2429 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2430 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2431 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2432 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2433 } 2434 } else { 2435 switch (NVT.SimpleTy) { 2436 default: llvm_unreachable("Unsupported VT!"); 2437 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2438 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2439 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2440 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2441 } 2442 } 2443 2444 unsigned LoReg, HiReg, ClrReg; 2445 unsigned SExtOpcode; 2446 switch (NVT.SimpleTy) { 2447 default: llvm_unreachable("Unsupported VT!"); 2448 case MVT::i8: 2449 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2450 SExtOpcode = X86::CBW; 2451 break; 2452 case MVT::i16: 2453 LoReg = X86::AX; HiReg = X86::DX; 2454 ClrReg = X86::DX; 2455 SExtOpcode = X86::CWD; 2456 break; 2457 case MVT::i32: 2458 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2459 SExtOpcode = X86::CDQ; 2460 break; 2461 case MVT::i64: 2462 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2463 SExtOpcode = X86::CQO; 2464 break; 2465 } 2466 2467 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2468 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2469 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2470 2471 SDValue InFlag; 2472 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2473 // Special case for div8, just use a move with zero extension to AX to 2474 // clear the upper 8 bits (AH). 2475 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2476 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2477 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2478 Move = 2479 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2480 MVT::Other, Ops), 0); 2481 Chain = Move.getValue(1); 2482 ReplaceUses(N0.getValue(1), Chain); 2483 } else { 2484 Move = 2485 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2486 Chain = CurDAG->getEntryNode(); 2487 } 2488 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2489 InFlag = Chain.getValue(1); 2490 } else { 2491 InFlag = 2492 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2493 LoReg, N0, SDValue()).getValue(1); 2494 if (isSigned && !signBitIsZero) { 2495 // Sign extend the low part into the high part. 2496 InFlag = 2497 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2498 } else { 2499 // Zero out the high part, effectively zero extending the input. 2500 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0); 2501 switch (NVT.SimpleTy) { 2502 case MVT::i16: 2503 ClrNode = 2504 SDValue(CurDAG->getMachineNode( 2505 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, 2506 CurDAG->getTargetConstant(X86::sub_16bit, MVT::i32)), 2507 0); 2508 break; 2509 case MVT::i32: 2510 break; 2511 case MVT::i64: 2512 ClrNode = 2513 SDValue(CurDAG->getMachineNode( 2514 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 2515 CurDAG->getTargetConstant(0, MVT::i64), ClrNode, 2516 CurDAG->getTargetConstant(X86::sub_32bit, MVT::i32)), 2517 0); 2518 break; 2519 default: 2520 llvm_unreachable("Unexpected division source"); 2521 } 2522 2523 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2524 ClrNode, InFlag).getValue(1); 2525 } 2526 } 2527 2528 if (foldedLoad) { 2529 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2530 InFlag }; 2531 SDNode *CNode = 2532 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); 2533 InFlag = SDValue(CNode, 1); 2534 // Update the chain. 2535 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2536 } else { 2537 InFlag = 2538 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2539 } 2540 2541 // Prevent use of AH in a REX instruction by referencing AX instead. 2542 // Shift it down 8 bits. 2543 // 2544 // The current assumption of the register allocator is that isel 2545 // won't generate explicit references to the GPR8_NOREX registers. If 2546 // the allocator and/or the backend get enhanced to be more robust in 2547 // that regard, this can be, and should be, removed. 2548 if (HiReg == X86::AH && Subtarget->is64Bit() && 2549 !SDValue(Node, 1).use_empty()) { 2550 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2551 X86::AX, MVT::i16, InFlag); 2552 InFlag = Result.getValue(2); 2553 2554 // If we also need AL (the quotient), get it by extracting a subreg from 2555 // Result. The fast register allocator does not like multiple CopyFromReg 2556 // nodes using aliasing registers. 2557 if (!SDValue(Node, 0).use_empty()) 2558 ReplaceUses(SDValue(Node, 0), 2559 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2560 2561 // Shift AX right by 8 bits instead of using AH. 2562 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2563 Result, 2564 CurDAG->getTargetConstant(8, MVT::i8)), 2565 0); 2566 ReplaceUses(SDValue(Node, 1), 2567 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2568 } 2569 // Copy the division (low) result, if it is needed. 2570 if (!SDValue(Node, 0).use_empty()) { 2571 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2572 LoReg, NVT, InFlag); 2573 InFlag = Result.getValue(2); 2574 ReplaceUses(SDValue(Node, 0), Result); 2575 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2576 } 2577 // Copy the remainder (high) result, if it is needed. 2578 if (!SDValue(Node, 1).use_empty()) { 2579 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2580 HiReg, NVT, InFlag); 2581 InFlag = Result.getValue(2); 2582 ReplaceUses(SDValue(Node, 1), Result); 2583 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2584 } 2585 return NULL; 2586 } 2587 2588 case X86ISD::CMP: 2589 case X86ISD::SUB: { 2590 // Sometimes a SUB is used to perform comparison. 2591 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2592 // This node is not a CMP. 2593 break; 2594 SDValue N0 = Node->getOperand(0); 2595 SDValue N1 = Node->getOperand(1); 2596 2597 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2598 // use a smaller encoding. 2599 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2600 HasNoSignedComparisonUses(Node)) 2601 // Look past the truncate if CMP is the only use of it. 2602 N0 = N0.getOperand(0); 2603 if ((N0.getNode()->getOpcode() == ISD::AND || 2604 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2605 N0.getNode()->hasOneUse() && 2606 N0.getValueType() != MVT::i8 && 2607 X86::isZeroNode(N1)) { 2608 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2609 if (!C) break; 2610 2611 // For example, convert "testl %eax, $8" to "testb %al, $8" 2612 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2613 (!(C->getZExtValue() & 0x80) || 2614 HasNoSignedComparisonUses(Node))) { 2615 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2616 SDValue Reg = N0.getNode()->getOperand(0); 2617 2618 // On x86-32, only the ABCD registers have 8-bit subregisters. 2619 if (!Subtarget->is64Bit()) { 2620 const TargetRegisterClass *TRC; 2621 switch (N0.getSimpleValueType().SimpleTy) { 2622 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2623 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2624 default: llvm_unreachable("Unsupported TEST operand type!"); 2625 } 2626 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2627 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2628 Reg.getValueType(), Reg, RC), 0); 2629 } 2630 2631 // Extract the l-register. 2632 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2633 MVT::i8, Reg); 2634 2635 // Emit a testb. 2636 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2637 Subreg, Imm); 2638 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2639 // one, do not call ReplaceAllUsesWith. 2640 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2641 SDValue(NewNode, 0)); 2642 return NULL; 2643 } 2644 2645 // For example, "testl %eax, $2048" to "testb %ah, $8". 2646 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2647 (!(C->getZExtValue() & 0x8000) || 2648 HasNoSignedComparisonUses(Node))) { 2649 // Shift the immediate right by 8 bits. 2650 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2651 MVT::i8); 2652 SDValue Reg = N0.getNode()->getOperand(0); 2653 2654 // Put the value in an ABCD register. 2655 const TargetRegisterClass *TRC; 2656 switch (N0.getSimpleValueType().SimpleTy) { 2657 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2658 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2659 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2660 default: llvm_unreachable("Unsupported TEST operand type!"); 2661 } 2662 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2663 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2664 Reg.getValueType(), Reg, RC), 0); 2665 2666 // Extract the h-register. 2667 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2668 MVT::i8, Reg); 2669 2670 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2671 // target GR8_NOREX registers, so make sure the register class is 2672 // forced. 2673 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, 2674 MVT::i32, Subreg, ShiftedImm); 2675 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2676 // one, do not call ReplaceAllUsesWith. 2677 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2678 SDValue(NewNode, 0)); 2679 return NULL; 2680 } 2681 2682 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2683 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2684 N0.getValueType() != MVT::i16 && 2685 (!(C->getZExtValue() & 0x8000) || 2686 HasNoSignedComparisonUses(Node))) { 2687 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2688 SDValue Reg = N0.getNode()->getOperand(0); 2689 2690 // Extract the 16-bit subregister. 2691 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2692 MVT::i16, Reg); 2693 2694 // Emit a testw. 2695 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, 2696 Subreg, Imm); 2697 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2698 // one, do not call ReplaceAllUsesWith. 2699 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2700 SDValue(NewNode, 0)); 2701 return NULL; 2702 } 2703 2704 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2705 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2706 N0.getValueType() == MVT::i64 && 2707 (!(C->getZExtValue() & 0x80000000) || 2708 HasNoSignedComparisonUses(Node))) { 2709 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2710 SDValue Reg = N0.getNode()->getOperand(0); 2711 2712 // Extract the 32-bit subregister. 2713 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2714 MVT::i32, Reg); 2715 2716 // Emit a testl. 2717 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, 2718 Subreg, Imm); 2719 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2720 // one, do not call ReplaceAllUsesWith. 2721 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2722 SDValue(NewNode, 0)); 2723 return NULL; 2724 } 2725 } 2726 break; 2727 } 2728 case ISD::STORE: { 2729 // Change a chain of {load; incr or dec; store} of the same value into 2730 // a simple increment or decrement through memory of that value, if the 2731 // uses of the modified value and its address are suitable. 2732 // The DEC64m tablegen pattern is currently not able to match the case where 2733 // the EFLAGS on the original DEC are used. (This also applies to 2734 // {INC,DEC}X{64,32,16,8}.) 2735 // We'll need to improve tablegen to allow flags to be transferred from a 2736 // node in the pattern to the result node. probably with a new keyword 2737 // for example, we have this 2738 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2739 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2740 // (implicit EFLAGS)]>; 2741 // but maybe need something like this 2742 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2743 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2744 // (transferrable EFLAGS)]>; 2745 2746 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2747 SDValue StoredVal = StoreNode->getOperand(1); 2748 unsigned Opc = StoredVal->getOpcode(); 2749 2750 LoadSDNode *LoadNode = 0; 2751 SDValue InputChain; 2752 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2753 LoadNode, InputChain)) 2754 break; 2755 2756 SDValue Base, Scale, Index, Disp, Segment; 2757 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2758 Base, Scale, Index, Disp, Segment)) 2759 break; 2760 2761 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2762 MemOp[0] = StoreNode->getMemOperand(); 2763 MemOp[1] = LoadNode->getMemOperand(); 2764 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2765 EVT LdVT = LoadNode->getMemoryVT(); 2766 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2767 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2768 SDLoc(Node), 2769 MVT::i32, MVT::Other, Ops); 2770 Result->setMemRefs(MemOp, MemOp + 2); 2771 2772 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2773 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2774 2775 return Result; 2776 } 2777 } 2778 2779 SDNode *ResNode = SelectCode(Node); 2780 2781 DEBUG(dbgs() << "=> "; 2782 if (ResNode == NULL || ResNode == Node) 2783 Node->dump(CurDAG); 2784 else 2785 ResNode->dump(CurDAG); 2786 dbgs() << '\n'); 2787 2788 return ResNode; 2789} 2790 2791bool X86DAGToDAGISel:: 2792SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2793 std::vector<SDValue> &OutOps) { 2794 SDValue Op0, Op1, Op2, Op3, Op4; 2795 switch (ConstraintCode) { 2796 case 'o': // offsetable ?? 2797 case 'v': // not offsetable ?? 2798 default: return true; 2799 case 'm': // memory 2800 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2801 return true; 2802 break; 2803 } 2804 2805 OutOps.push_back(Op0); 2806 OutOps.push_back(Op1); 2807 OutOps.push_back(Op2); 2808 OutOps.push_back(Op3); 2809 OutOps.push_back(Op4); 2810 return false; 2811} 2812 2813/// createX86ISelDag - This pass converts a legalized DAG into a 2814/// X86-specific DAG, ready for instruction scheduling. 2815/// 2816FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2817 CodeGenOpt::Level OptLevel) { 2818 return new X86DAGToDAGISel(TM, OptLevel); 2819} 2820