1193323Sed//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11193323Sed// both before and after the DAG is legalized. 12193323Sed// 13193323Sed// This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14193323Sed// primarily intended to handle simplification opportunities that are implicit 15193323Sed// in the LLVM IR and exposed by the various codegen lowering phases. 16193323Sed// 17193323Sed//===----------------------------------------------------------------------===// 18193323Sed 19193323Sed#define DEBUG_TYPE "dagcombine" 20193323Sed#include "llvm/CodeGen/SelectionDAG.h" 21193323Sed#include "llvm/ADT/SmallPtrSet.h" 22193323Sed#include "llvm/ADT/Statistic.h" 23249423Sdim#include "llvm/Analysis/AliasAnalysis.h" 24249423Sdim#include "llvm/CodeGen/MachineFrameInfo.h" 25249423Sdim#include "llvm/CodeGen/MachineFunction.h" 26249423Sdim#include "llvm/IR/DataLayout.h" 27249423Sdim#include "llvm/IR/DerivedTypes.h" 28249423Sdim#include "llvm/IR/Function.h" 29249423Sdim#include "llvm/IR/LLVMContext.h" 30193323Sed#include "llvm/Support/CommandLine.h" 31193323Sed#include "llvm/Support/Debug.h" 32198090Srdivacky#include "llvm/Support/ErrorHandling.h" 33193323Sed#include "llvm/Support/MathExtras.h" 34198090Srdivacky#include "llvm/Support/raw_ostream.h" 35249423Sdim#include "llvm/Target/TargetLowering.h" 36249423Sdim#include "llvm/Target/TargetMachine.h" 37249423Sdim#include "llvm/Target/TargetOptions.h" 38193323Sed#include <algorithm> 39193323Sedusing namespace llvm; 40193323Sed 41193323SedSTATISTIC(NodesCombined , "Number of dag nodes combined"); 42193323SedSTATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 43193323SedSTATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 44193323SedSTATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 45218893SdimSTATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 46193323Sed 47193323Sednamespace { 48193323Sed static cl::opt<bool> 49193323Sed CombinerAA("combiner-alias-analysis", cl::Hidden, 50193323Sed cl::desc("Turn on alias analysis during testing")); 51193323Sed 52193323Sed static cl::opt<bool> 53193323Sed CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 54193323Sed cl::desc("Include global information in alias analysis")); 55193323Sed 56193323Sed//------------------------------ DAGCombiner ---------------------------------// 57193323Sed 58198892Srdivacky class DAGCombiner { 59193323Sed SelectionDAG &DAG; 60193323Sed const TargetLowering &TLI; 61193323Sed CombineLevel Level; 62193323Sed CodeGenOpt::Level OptLevel; 63193323Sed bool LegalOperations; 64193323Sed bool LegalTypes; 65193323Sed 66193323Sed // Worklist of all of the nodes that need to be simplified. 67234353Sdim // 68234353Sdim // This has the semantics that when adding to the worklist, 69234353Sdim // the item added must be next to be processed. It should 70234353Sdim // also only appear once. The naive approach to this takes 71234353Sdim // linear time. 72234353Sdim // 73234353Sdim // To reduce the insert/remove time to logarithmic, we use 74234353Sdim // a set and a vector to maintain our worklist. 75234353Sdim // 76234353Sdim // The set contains the items on the worklist, but does not 77234353Sdim // maintain the order they should be visited. 78234353Sdim // 79234353Sdim // The vector maintains the order nodes should be visited, but may 80234353Sdim // contain duplicate or removed nodes. When choosing a node to 81234353Sdim // visit, we pop off the order stack until we find an item that is 82234353Sdim // also in the contents set. All operations are O(log N). 83234353Sdim SmallPtrSet<SDNode*, 64> WorkListContents; 84234353Sdim SmallVector<SDNode*, 64> WorkListOrder; 85193323Sed 86193323Sed // AA - Used for DAG load/store alias analysis. 87193323Sed AliasAnalysis &AA; 88193323Sed 89193323Sed /// AddUsersToWorkList - When an instruction is simplified, add all users of 90193323Sed /// the instruction to the work lists because they might get more simplified 91193323Sed /// now. 92193323Sed /// 93193323Sed void AddUsersToWorkList(SDNode *N) { 94193323Sed for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 95193323Sed UI != UE; ++UI) 96193323Sed AddToWorkList(*UI); 97193323Sed } 98193323Sed 99193323Sed /// visit - call the node-specific routine that knows how to fold each 100193323Sed /// particular type of node. 101193323Sed SDValue visit(SDNode *N); 102193323Sed 103193323Sed public: 104234353Sdim /// AddToWorkList - Add to the work list making sure its instance is at the 105234353Sdim /// back (next to be processed.) 106193323Sed void AddToWorkList(SDNode *N) { 107234353Sdim WorkListContents.insert(N); 108234353Sdim WorkListOrder.push_back(N); 109193323Sed } 110193323Sed 111193323Sed /// removeFromWorkList - remove all instances of N from the worklist. 112193323Sed /// 113193323Sed void removeFromWorkList(SDNode *N) { 114234353Sdim WorkListContents.erase(N); 115193323Sed } 116193323Sed 117193323Sed SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 118193323Sed bool AddTo = true); 119193323Sed 120193323Sed SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 121193323Sed return CombineTo(N, &Res, 1, AddTo); 122193323Sed } 123193323Sed 124193323Sed SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 125193323Sed bool AddTo = true) { 126193323Sed SDValue To[] = { Res0, Res1 }; 127193323Sed return CombineTo(N, To, 2, AddTo); 128193323Sed } 129193323Sed 130193323Sed void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 131193323Sed 132193323Sed private: 133193323Sed 134193323Sed /// SimplifyDemandedBits - Check the specified integer node value to see if 135193323Sed /// it can be simplified or if things it uses can be simplified by bit 136193323Sed /// propagation. If so, return true. 137193323Sed bool SimplifyDemandedBits(SDValue Op) { 138200581Srdivacky unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 139200581Srdivacky APInt Demanded = APInt::getAllOnesValue(BitWidth); 140193323Sed return SimplifyDemandedBits(Op, Demanded); 141193323Sed } 142193323Sed 143193323Sed bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 144193323Sed 145193323Sed bool CombineToPreIndexedLoadStore(SDNode *N); 146193323Sed bool CombineToPostIndexedLoadStore(SDNode *N); 147193323Sed 148207618Srdivacky void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 149207618Srdivacky SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 150207618Srdivacky SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 151207618Srdivacky SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 152207618Srdivacky SDValue PromoteIntBinOp(SDValue Op); 153207618Srdivacky SDValue PromoteIntShiftOp(SDValue Op); 154207618Srdivacky SDValue PromoteExtend(SDValue Op); 155207618Srdivacky bool PromoteLoad(SDValue Op); 156193323Sed 157224145Sdim void ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs, 158224145Sdim SDValue Trunc, SDValue ExtLoad, DebugLoc DL, 159224145Sdim ISD::NodeType ExtType); 160224145Sdim 161193323Sed /// combine - call the node-specific routine that knows how to fold each 162193323Sed /// particular type of node. If that doesn't do anything, try the 163193323Sed /// target-specific DAG combines. 164193323Sed SDValue combine(SDNode *N); 165193323Sed 166193323Sed // Visitation implementation - Implement dag node combining for different 167193323Sed // node types. The semantics are as follows: 168193323Sed // Return Value: 169193323Sed // SDValue.getNode() == 0 - No change was made 170193323Sed // SDValue.getNode() == N - N was replaced, is dead and has been handled. 171193323Sed // otherwise - N should be replaced by the returned Operand. 172193323Sed // 173193323Sed SDValue visitTokenFactor(SDNode *N); 174193323Sed SDValue visitMERGE_VALUES(SDNode *N); 175193323Sed SDValue visitADD(SDNode *N); 176193323Sed SDValue visitSUB(SDNode *N); 177193323Sed SDValue visitADDC(SDNode *N); 178234353Sdim SDValue visitSUBC(SDNode *N); 179193323Sed SDValue visitADDE(SDNode *N); 180234353Sdim SDValue visitSUBE(SDNode *N); 181193323Sed SDValue visitMUL(SDNode *N); 182193323Sed SDValue visitSDIV(SDNode *N); 183193323Sed SDValue visitUDIV(SDNode *N); 184193323Sed SDValue visitSREM(SDNode *N); 185193323Sed SDValue visitUREM(SDNode *N); 186193323Sed SDValue visitMULHU(SDNode *N); 187193323Sed SDValue visitMULHS(SDNode *N); 188193323Sed SDValue visitSMUL_LOHI(SDNode *N); 189193323Sed SDValue visitUMUL_LOHI(SDNode *N); 190223017Sdim SDValue visitSMULO(SDNode *N); 191223017Sdim SDValue visitUMULO(SDNode *N); 192193323Sed SDValue visitSDIVREM(SDNode *N); 193193323Sed SDValue visitUDIVREM(SDNode *N); 194193323Sed SDValue visitAND(SDNode *N); 195193323Sed SDValue visitOR(SDNode *N); 196193323Sed SDValue visitXOR(SDNode *N); 197193323Sed SDValue SimplifyVBinOp(SDNode *N); 198243830Sdim SDValue SimplifyVUnaryOp(SDNode *N); 199193323Sed SDValue visitSHL(SDNode *N); 200193323Sed SDValue visitSRA(SDNode *N); 201193323Sed SDValue visitSRL(SDNode *N); 202193323Sed SDValue visitCTLZ(SDNode *N); 203234353Sdim SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 204193323Sed SDValue visitCTTZ(SDNode *N); 205234353Sdim SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 206193323Sed SDValue visitCTPOP(SDNode *N); 207193323Sed SDValue visitSELECT(SDNode *N); 208251662Sdim SDValue visitVSELECT(SDNode *N); 209193323Sed SDValue visitSELECT_CC(SDNode *N); 210193323Sed SDValue visitSETCC(SDNode *N); 211193323Sed SDValue visitSIGN_EXTEND(SDNode *N); 212193323Sed SDValue visitZERO_EXTEND(SDNode *N); 213193323Sed SDValue visitANY_EXTEND(SDNode *N); 214193323Sed SDValue visitSIGN_EXTEND_INREG(SDNode *N); 215193323Sed SDValue visitTRUNCATE(SDNode *N); 216218893Sdim SDValue visitBITCAST(SDNode *N); 217193323Sed SDValue visitBUILD_PAIR(SDNode *N); 218193323Sed SDValue visitFADD(SDNode *N); 219193323Sed SDValue visitFSUB(SDNode *N); 220193323Sed SDValue visitFMUL(SDNode *N); 221239462Sdim SDValue visitFMA(SDNode *N); 222193323Sed SDValue visitFDIV(SDNode *N); 223193323Sed SDValue visitFREM(SDNode *N); 224193323Sed SDValue visitFCOPYSIGN(SDNode *N); 225193323Sed SDValue visitSINT_TO_FP(SDNode *N); 226193323Sed SDValue visitUINT_TO_FP(SDNode *N); 227193323Sed SDValue visitFP_TO_SINT(SDNode *N); 228193323Sed SDValue visitFP_TO_UINT(SDNode *N); 229193323Sed SDValue visitFP_ROUND(SDNode *N); 230193323Sed SDValue visitFP_ROUND_INREG(SDNode *N); 231193323Sed SDValue visitFP_EXTEND(SDNode *N); 232193323Sed SDValue visitFNEG(SDNode *N); 233193323Sed SDValue visitFABS(SDNode *N); 234239462Sdim SDValue visitFCEIL(SDNode *N); 235239462Sdim SDValue visitFTRUNC(SDNode *N); 236239462Sdim SDValue visitFFLOOR(SDNode *N); 237193323Sed SDValue visitBRCOND(SDNode *N); 238193323Sed SDValue visitBR_CC(SDNode *N); 239193323Sed SDValue visitLOAD(SDNode *N); 240193323Sed SDValue visitSTORE(SDNode *N); 241193323Sed SDValue visitINSERT_VECTOR_ELT(SDNode *N); 242193323Sed SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 243193323Sed SDValue visitBUILD_VECTOR(SDNode *N); 244193323Sed SDValue visitCONCAT_VECTORS(SDNode *N); 245226633Sdim SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 246193323Sed SDValue visitVECTOR_SHUFFLE(SDNode *N); 247193323Sed 248193323Sed SDValue XformToShuffleWithZero(SDNode *N); 249193323Sed SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS); 250193323Sed 251193323Sed SDValue visitShiftByConstant(SDNode *N, unsigned Amt); 252193323Sed 253193323Sed bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 254193323Sed SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 255193323Sed SDValue SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2); 256193323Sed SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2, 257193323Sed SDValue N3, ISD::CondCode CC, 258193323Sed bool NotExtCompare = false); 259198090Srdivacky SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 260193323Sed DebugLoc DL, bool foldBooleans = true); 261193323Sed SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 262193323Sed unsigned HiOp); 263198090Srdivacky SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 264218893Sdim SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 265193323Sed SDValue BuildSDIV(SDNode *N); 266193323Sed SDValue BuildUDIV(SDNode *N); 267224145Sdim SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 268224145Sdim bool DemandHighBits = true); 269224145Sdim SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 270193323Sed SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL); 271193323Sed SDValue ReduceLoadWidth(SDNode *N); 272193323Sed SDValue ReduceLoadOpStoreWidth(SDNode *N); 273218893Sdim SDValue TransformFPLoadStorePair(SDNode *N); 274243830Sdim SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); 275243830Sdim SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); 276193323Sed 277193323Sed SDValue GetDemandedBits(SDValue V, const APInt &Mask); 278193323Sed 279193323Sed /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 280193323Sed /// looking for aliasing nodes and adding them to the Aliases vector. 281193323Sed void GatherAllAliases(SDNode *N, SDValue OriginalChain, 282193323Sed SmallVector<SDValue, 8> &Aliases); 283193323Sed 284193323Sed /// isAlias - Return true if there is any possibility that the two addresses 285193323Sed /// overlap. 286193323Sed bool isAlias(SDValue Ptr1, int64_t Size1, 287193323Sed const Value *SrcValue1, int SrcValueOffset1, 288198090Srdivacky unsigned SrcValueAlign1, 289218893Sdim const MDNode *TBAAInfo1, 290193323Sed SDValue Ptr2, int64_t Size2, 291198090Srdivacky const Value *SrcValue2, int SrcValueOffset2, 292218893Sdim unsigned SrcValueAlign2, 293218893Sdim const MDNode *TBAAInfo2) const; 294193323Sed 295249423Sdim /// isAlias - Return true if there is any possibility that the two addresses 296249423Sdim /// overlap. 297249423Sdim bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1); 298249423Sdim 299193323Sed /// FindAliasInfo - Extracts the relevant alias information from the memory 300193323Sed /// node. Returns true if the operand was a load. 301193323Sed bool FindAliasInfo(SDNode *N, 302193323Sed SDValue &Ptr, int64_t &Size, 303198090Srdivacky const Value *&SrcValue, int &SrcValueOffset, 304218893Sdim unsigned &SrcValueAlignment, 305218893Sdim const MDNode *&TBAAInfo) const; 306193323Sed 307193323Sed /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, 308193323Sed /// looking for a better chain (aliasing node.) 309193323Sed SDValue FindBetterChain(SDNode *N, SDValue Chain); 310193323Sed 311243830Sdim /// Merge consecutive store operations into a wide store. 312243830Sdim /// This optimization uses wide integers or vectors when possible. 313243830Sdim /// \return True if some memory operations were changed. 314243830Sdim bool MergeConsecutiveStores(StoreSDNode *N); 315243830Sdim 316207618Srdivacky public: 317207618Srdivacky DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 318234353Sdim : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 319207618Srdivacky OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {} 320207618Srdivacky 321207618Srdivacky /// Run - runs the dag combiner on all nodes in the work list 322207618Srdivacky void Run(CombineLevel AtLevel); 323218893Sdim 324207618Srdivacky SelectionDAG &getDAG() const { return DAG; } 325218893Sdim 326193323Sed /// getShiftAmountTy - Returns a type large enough to hold any valid 327193323Sed /// shift amount - before type legalization these can be huge. 328219077Sdim EVT getShiftAmountTy(EVT LHSTy) { 329219077Sdim return LegalTypes ? TLI.getShiftAmountTy(LHSTy) : TLI.getPointerTy(); 330193323Sed } 331218893Sdim 332207618Srdivacky /// isTypeLegal - This method returns true if we are running before type 333207618Srdivacky /// legalization or if the specified VT is legal. 334207618Srdivacky bool isTypeLegal(const EVT &VT) { 335207618Srdivacky if (!LegalTypes) return true; 336207618Srdivacky return TLI.isTypeLegal(VT); 337207618Srdivacky } 338193323Sed }; 339193323Sed} 340193323Sed 341193323Sed 342193323Sednamespace { 343193323Sed/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted 344193323Sed/// nodes from the worklist. 345198892Srdivackyclass WorkListRemover : public SelectionDAG::DAGUpdateListener { 346193323Sed DAGCombiner &DC; 347193323Sedpublic: 348239462Sdim explicit WorkListRemover(DAGCombiner &dc) 349239462Sdim : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 350193323Sed 351193323Sed virtual void NodeDeleted(SDNode *N, SDNode *E) { 352193323Sed DC.removeFromWorkList(N); 353193323Sed } 354193323Sed}; 355193323Sed} 356193323Sed 357193323Sed//===----------------------------------------------------------------------===// 358193323Sed// TargetLowering::DAGCombinerInfo implementation 359193323Sed//===----------------------------------------------------------------------===// 360193323Sed 361193323Sedvoid TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 362193323Sed ((DAGCombiner*)DC)->AddToWorkList(N); 363193323Sed} 364193323Sed 365221345Sdimvoid TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) { 366221345Sdim ((DAGCombiner*)DC)->removeFromWorkList(N); 367221345Sdim} 368221345Sdim 369193323SedSDValue TargetLowering::DAGCombinerInfo:: 370193323SedCombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) { 371193323Sed return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 372193323Sed} 373193323Sed 374193323SedSDValue TargetLowering::DAGCombinerInfo:: 375193323SedCombineTo(SDNode *N, SDValue Res, bool AddTo) { 376193323Sed return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 377193323Sed} 378193323Sed 379193323Sed 380193323SedSDValue TargetLowering::DAGCombinerInfo:: 381193323SedCombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 382193323Sed return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 383193323Sed} 384193323Sed 385193323Sedvoid TargetLowering::DAGCombinerInfo:: 386193323SedCommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 387193323Sed return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 388193323Sed} 389193323Sed 390193323Sed//===----------------------------------------------------------------------===// 391193323Sed// Helper Functions 392193323Sed//===----------------------------------------------------------------------===// 393193323Sed 394193323Sed/// isNegatibleForFree - Return 1 if we can compute the negated form of the 395193323Sed/// specified expression for the same cost as the expression itself, or 2 if we 396193323Sed/// can compute the negated form more cheaply than the expression itself. 397193323Sedstatic char isNegatibleForFree(SDValue Op, bool LegalOperations, 398234353Sdim const TargetLowering &TLI, 399234353Sdim const TargetOptions *Options, 400193323Sed unsigned Depth = 0) { 401193323Sed // fneg is removable even if it has multiple uses. 402193323Sed if (Op.getOpcode() == ISD::FNEG) return 2; 403193323Sed 404193323Sed // Don't allow anything with multiple uses. 405193323Sed if (!Op.hasOneUse()) return 0; 406193323Sed 407193323Sed // Don't recurse exponentially. 408193323Sed if (Depth > 6) return 0; 409193323Sed 410193323Sed switch (Op.getOpcode()) { 411193323Sed default: return false; 412193323Sed case ISD::ConstantFP: 413193323Sed // Don't invert constant FP values after legalize. The negated constant 414193323Sed // isn't necessarily legal. 415193323Sed return LegalOperations ? 0 : 1; 416193323Sed case ISD::FADD: 417193323Sed // FIXME: determine better conditions for this xform. 418234353Sdim if (!Options->UnsafeFPMath) return 0; 419193323Sed 420234353Sdim // After operation legalization, it might not be legal to create new FSUBs. 421234353Sdim if (LegalOperations && 422234353Sdim !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 423234353Sdim return 0; 424234353Sdim 425243830Sdim // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 426234353Sdim if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 427234353Sdim Options, Depth + 1)) 428193323Sed return V; 429193323Sed // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 430234353Sdim return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 431234353Sdim Depth + 1); 432193323Sed case ISD::FSUB: 433193323Sed // We can't turn -(A-B) into B-A when we honor signed zeros. 434234353Sdim if (!Options->UnsafeFPMath) return 0; 435193323Sed 436193323Sed // fold (fneg (fsub A, B)) -> (fsub B, A) 437193323Sed return 1; 438193323Sed 439193323Sed case ISD::FMUL: 440193323Sed case ISD::FDIV: 441234353Sdim if (Options->HonorSignDependentRoundingFPMath()) return 0; 442193323Sed 443193323Sed // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 444234353Sdim if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 445234353Sdim Options, Depth + 1)) 446193323Sed return V; 447193323Sed 448234353Sdim return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 449234353Sdim Depth + 1); 450193323Sed 451193323Sed case ISD::FP_EXTEND: 452193323Sed case ISD::FP_ROUND: 453193323Sed case ISD::FSIN: 454234353Sdim return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 455234353Sdim Depth + 1); 456193323Sed } 457193323Sed} 458193323Sed 459193323Sed/// GetNegatedExpression - If isNegatibleForFree returns true, this function 460193323Sed/// returns the newly negated expression. 461193323Sedstatic SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 462193323Sed bool LegalOperations, unsigned Depth = 0) { 463193323Sed // fneg is removable even if it has multiple uses. 464193323Sed if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 465193323Sed 466193323Sed // Don't allow anything with multiple uses. 467193323Sed assert(Op.hasOneUse() && "Unknown reuse!"); 468193323Sed 469193323Sed assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 470193323Sed switch (Op.getOpcode()) { 471198090Srdivacky default: llvm_unreachable("Unknown code"); 472193323Sed case ISD::ConstantFP: { 473193323Sed APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 474193323Sed V.changeSign(); 475193323Sed return DAG.getConstantFP(V, Op.getValueType()); 476193323Sed } 477193323Sed case ISD::FADD: 478193323Sed // FIXME: determine better conditions for this xform. 479234353Sdim assert(DAG.getTarget().Options.UnsafeFPMath); 480193323Sed 481193323Sed // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 482234353Sdim if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 483234353Sdim DAG.getTargetLoweringInfo(), 484234353Sdim &DAG.getTarget().Options, Depth+1)) 485193323Sed return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 486193323Sed GetNegatedExpression(Op.getOperand(0), DAG, 487193323Sed LegalOperations, Depth+1), 488193323Sed Op.getOperand(1)); 489193323Sed // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 490193323Sed return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 491193323Sed GetNegatedExpression(Op.getOperand(1), DAG, 492193323Sed LegalOperations, Depth+1), 493193323Sed Op.getOperand(0)); 494193323Sed case ISD::FSUB: 495193323Sed // We can't turn -(A-B) into B-A when we honor signed zeros. 496234353Sdim assert(DAG.getTarget().Options.UnsafeFPMath); 497193323Sed 498193323Sed // fold (fneg (fsub 0, B)) -> B 499193323Sed if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 500193323Sed if (N0CFP->getValueAPF().isZero()) 501193323Sed return Op.getOperand(1); 502193323Sed 503193323Sed // fold (fneg (fsub A, B)) -> (fsub B, A) 504193323Sed return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 505193323Sed Op.getOperand(1), Op.getOperand(0)); 506193323Sed 507193323Sed case ISD::FMUL: 508193323Sed case ISD::FDIV: 509234353Sdim assert(!DAG.getTarget().Options.HonorSignDependentRoundingFPMath()); 510193323Sed 511193323Sed // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 512234353Sdim if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 513234353Sdim DAG.getTargetLoweringInfo(), 514234353Sdim &DAG.getTarget().Options, Depth+1)) 515193323Sed return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 516193323Sed GetNegatedExpression(Op.getOperand(0), DAG, 517193323Sed LegalOperations, Depth+1), 518193323Sed Op.getOperand(1)); 519193323Sed 520193323Sed // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 521193323Sed return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 522193323Sed Op.getOperand(0), 523193323Sed GetNegatedExpression(Op.getOperand(1), DAG, 524193323Sed LegalOperations, Depth+1)); 525193323Sed 526193323Sed case ISD::FP_EXTEND: 527193323Sed case ISD::FSIN: 528193323Sed return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 529193323Sed GetNegatedExpression(Op.getOperand(0), DAG, 530193323Sed LegalOperations, Depth+1)); 531193323Sed case ISD::FP_ROUND: 532193323Sed return DAG.getNode(ISD::FP_ROUND, Op.getDebugLoc(), Op.getValueType(), 533193323Sed GetNegatedExpression(Op.getOperand(0), DAG, 534193323Sed LegalOperations, Depth+1), 535193323Sed Op.getOperand(1)); 536193323Sed } 537193323Sed} 538193323Sed 539193323Sed 540193323Sed// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc 541193323Sed// that selects between the values 1 and 0, making it equivalent to a setcc. 542193323Sed// Also, set the incoming LHS, RHS, and CC references to the appropriate 543193323Sed// nodes based on the type of node we are checking. This simplifies life a 544193323Sed// bit for the callers. 545193323Sedstatic bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 546193323Sed SDValue &CC) { 547193323Sed if (N.getOpcode() == ISD::SETCC) { 548193323Sed LHS = N.getOperand(0); 549193323Sed RHS = N.getOperand(1); 550193323Sed CC = N.getOperand(2); 551193323Sed return true; 552193323Sed } 553193323Sed if (N.getOpcode() == ISD::SELECT_CC && 554193323Sed N.getOperand(2).getOpcode() == ISD::Constant && 555193323Sed N.getOperand(3).getOpcode() == ISD::Constant && 556193323Sed cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 && 557193323Sed cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) { 558193323Sed LHS = N.getOperand(0); 559193323Sed RHS = N.getOperand(1); 560193323Sed CC = N.getOperand(4); 561193323Sed return true; 562193323Sed } 563193323Sed return false; 564193323Sed} 565193323Sed 566193323Sed// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only 567193323Sed// one use. If this is true, it allows the users to invert the operation for 568193323Sed// free when it is profitable to do so. 569193323Sedstatic bool isOneUseSetCC(SDValue N) { 570193323Sed SDValue N0, N1, N2; 571193323Sed if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 572193323Sed return true; 573193323Sed return false; 574193323Sed} 575193323Sed 576193323SedSDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL, 577193323Sed SDValue N0, SDValue N1) { 578198090Srdivacky EVT VT = N0.getValueType(); 579193323Sed if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) { 580193323Sed if (isa<ConstantSDNode>(N1)) { 581193323Sed // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 582193323Sed SDValue OpNode = 583193323Sed DAG.FoldConstantArithmetic(Opc, VT, 584193323Sed cast<ConstantSDNode>(N0.getOperand(1)), 585193323Sed cast<ConstantSDNode>(N1)); 586193323Sed return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 587223017Sdim } 588223017Sdim if (N0.hasOneUse()) { 589193323Sed // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use 590193323Sed SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, 591193323Sed N0.getOperand(0), N1); 592193323Sed AddToWorkList(OpNode.getNode()); 593193323Sed return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 594193323Sed } 595193323Sed } 596193323Sed 597193323Sed if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) { 598193323Sed if (isa<ConstantSDNode>(N0)) { 599193323Sed // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 600193323Sed SDValue OpNode = 601193323Sed DAG.FoldConstantArithmetic(Opc, VT, 602193323Sed cast<ConstantSDNode>(N1.getOperand(1)), 603193323Sed cast<ConstantSDNode>(N0)); 604193323Sed return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 605223017Sdim } 606223017Sdim if (N1.hasOneUse()) { 607193323Sed // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use 608193323Sed SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, 609193323Sed N1.getOperand(0), N0); 610193323Sed AddToWorkList(OpNode.getNode()); 611193323Sed return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 612193323Sed } 613193323Sed } 614193323Sed 615193323Sed return SDValue(); 616193323Sed} 617193323Sed 618193323SedSDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 619193323Sed bool AddTo) { 620193323Sed assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 621193323Sed ++NodesCombined; 622202375Srdivacky DEBUG(dbgs() << "\nReplacing.1 "; 623198090Srdivacky N->dump(&DAG); 624202375Srdivacky dbgs() << "\nWith: "; 625198090Srdivacky To[0].getNode()->dump(&DAG); 626202375Srdivacky dbgs() << " and " << NumTo-1 << " other values\n"; 627198090Srdivacky for (unsigned i = 0, e = NumTo; i != e; ++i) 628200581Srdivacky assert((!To[i].getNode() || 629200581Srdivacky N->getValueType(i) == To[i].getValueType()) && 630193323Sed "Cannot combine value to value of different type!")); 631193323Sed WorkListRemover DeadNodes(*this); 632239462Sdim DAG.ReplaceAllUsesWith(N, To); 633193323Sed if (AddTo) { 634193323Sed // Push the new nodes and any users onto the worklist 635193323Sed for (unsigned i = 0, e = NumTo; i != e; ++i) { 636193323Sed if (To[i].getNode()) { 637193323Sed AddToWorkList(To[i].getNode()); 638193323Sed AddUsersToWorkList(To[i].getNode()); 639193323Sed } 640193323Sed } 641193323Sed } 642193323Sed 643193323Sed // Finally, if the node is now dead, remove it from the graph. The node 644193323Sed // may not be dead if the replacement process recursively simplified to 645193323Sed // something else needing this node. 646193323Sed if (N->use_empty()) { 647193323Sed // Nodes can be reintroduced into the worklist. Make sure we do not 648193323Sed // process a node that has been replaced. 649193323Sed removeFromWorkList(N); 650193323Sed 651193323Sed // Finally, since the node is now dead, remove it from the graph. 652193323Sed DAG.DeleteNode(N); 653193323Sed } 654193323Sed return SDValue(N, 0); 655193323Sed} 656193323Sed 657207618Srdivackyvoid DAGCombiner:: 658207618SrdivackyCommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 659193323Sed // Replace all uses. If any nodes become isomorphic to other nodes and 660193323Sed // are deleted, make sure to remove them from our worklist. 661193323Sed WorkListRemover DeadNodes(*this); 662239462Sdim DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 663193323Sed 664193323Sed // Push the new node and any (possibly new) users onto the worklist. 665193323Sed AddToWorkList(TLO.New.getNode()); 666193323Sed AddUsersToWorkList(TLO.New.getNode()); 667193323Sed 668193323Sed // Finally, if the node is now dead, remove it from the graph. The node 669193323Sed // may not be dead if the replacement process recursively simplified to 670193323Sed // something else needing this node. 671193323Sed if (TLO.Old.getNode()->use_empty()) { 672193323Sed removeFromWorkList(TLO.Old.getNode()); 673193323Sed 674193323Sed // If the operands of this node are only used by the node, they will now 675193323Sed // be dead. Make sure to visit them first to delete dead nodes early. 676193323Sed for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i) 677193323Sed if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse()) 678193323Sed AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode()); 679193323Sed 680193323Sed DAG.DeleteNode(TLO.Old.getNode()); 681193323Sed } 682193323Sed} 683193323Sed 684193323Sed/// SimplifyDemandedBits - Check the specified integer node value to see if 685193323Sed/// it can be simplified or if things it uses can be simplified by bit 686193323Sed/// propagation. If so, return true. 687193323Sedbool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 688207618Srdivacky TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 689193323Sed APInt KnownZero, KnownOne; 690193323Sed if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 691193323Sed return false; 692193323Sed 693193323Sed // Revisit the node. 694193323Sed AddToWorkList(Op.getNode()); 695193323Sed 696193323Sed // Replace the old value with the new one. 697193323Sed ++NodesCombined; 698218893Sdim DEBUG(dbgs() << "\nReplacing.2 "; 699198090Srdivacky TLO.Old.getNode()->dump(&DAG); 700202375Srdivacky dbgs() << "\nWith: "; 701198090Srdivacky TLO.New.getNode()->dump(&DAG); 702202375Srdivacky dbgs() << '\n'); 703193323Sed 704193323Sed CommitTargetLoweringOpt(TLO); 705193323Sed return true; 706193323Sed} 707193323Sed 708207618Srdivackyvoid DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 709207618Srdivacky DebugLoc dl = Load->getDebugLoc(); 710207618Srdivacky EVT VT = Load->getValueType(0); 711207618Srdivacky SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0)); 712207618Srdivacky 713207618Srdivacky DEBUG(dbgs() << "\nReplacing.9 "; 714207618Srdivacky Load->dump(&DAG); 715207618Srdivacky dbgs() << "\nWith: "; 716207618Srdivacky Trunc.getNode()->dump(&DAG); 717207618Srdivacky dbgs() << '\n'); 718207618Srdivacky WorkListRemover DeadNodes(*this); 719239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 720239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 721207618Srdivacky removeFromWorkList(Load); 722207618Srdivacky DAG.DeleteNode(Load); 723207618Srdivacky AddToWorkList(Trunc.getNode()); 724207618Srdivacky} 725207618Srdivacky 726207618SrdivackySDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 727207618Srdivacky Replace = false; 728207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 729207618Srdivacky if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 730207618Srdivacky EVT MemVT = LD->getMemoryVT(); 731207618Srdivacky ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 732219077Sdim ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 733218893Sdim : ISD::EXTLOAD) 734207618Srdivacky : LD->getExtensionType(); 735207618Srdivacky Replace = true; 736218893Sdim return DAG.getExtLoad(ExtType, dl, PVT, 737207618Srdivacky LD->getChain(), LD->getBasePtr(), 738218893Sdim LD->getPointerInfo(), 739207618Srdivacky MemVT, LD->isVolatile(), 740207618Srdivacky LD->isNonTemporal(), LD->getAlignment()); 741207618Srdivacky } 742207618Srdivacky 743207618Srdivacky unsigned Opc = Op.getOpcode(); 744207618Srdivacky switch (Opc) { 745207618Srdivacky default: break; 746207618Srdivacky case ISD::AssertSext: 747207618Srdivacky return DAG.getNode(ISD::AssertSext, dl, PVT, 748207618Srdivacky SExtPromoteOperand(Op.getOperand(0), PVT), 749207618Srdivacky Op.getOperand(1)); 750207618Srdivacky case ISD::AssertZext: 751207618Srdivacky return DAG.getNode(ISD::AssertZext, dl, PVT, 752207618Srdivacky ZExtPromoteOperand(Op.getOperand(0), PVT), 753207618Srdivacky Op.getOperand(1)); 754207618Srdivacky case ISD::Constant: { 755207618Srdivacky unsigned ExtOpc = 756207618Srdivacky Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 757207618Srdivacky return DAG.getNode(ExtOpc, dl, PVT, Op); 758207618Srdivacky } 759218893Sdim } 760207618Srdivacky 761207618Srdivacky if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 762207618Srdivacky return SDValue(); 763207618Srdivacky return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op); 764207618Srdivacky} 765207618Srdivacky 766207618SrdivackySDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 767207618Srdivacky if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 768207618Srdivacky return SDValue(); 769207618Srdivacky EVT OldVT = Op.getValueType(); 770207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 771207618Srdivacky bool Replace = false; 772207618Srdivacky SDValue NewOp = PromoteOperand(Op, PVT, Replace); 773207618Srdivacky if (NewOp.getNode() == 0) 774207618Srdivacky return SDValue(); 775207618Srdivacky AddToWorkList(NewOp.getNode()); 776207618Srdivacky 777207618Srdivacky if (Replace) 778207618Srdivacky ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 779207618Srdivacky return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp, 780207618Srdivacky DAG.getValueType(OldVT)); 781207618Srdivacky} 782207618Srdivacky 783207618SrdivackySDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 784207618Srdivacky EVT OldVT = Op.getValueType(); 785207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 786207618Srdivacky bool Replace = false; 787207618Srdivacky SDValue NewOp = PromoteOperand(Op, PVT, Replace); 788207618Srdivacky if (NewOp.getNode() == 0) 789207618Srdivacky return SDValue(); 790207618Srdivacky AddToWorkList(NewOp.getNode()); 791207618Srdivacky 792207618Srdivacky if (Replace) 793207618Srdivacky ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 794207618Srdivacky return DAG.getZeroExtendInReg(NewOp, dl, OldVT); 795207618Srdivacky} 796207618Srdivacky 797207618Srdivacky/// PromoteIntBinOp - Promote the specified integer binary operation if the 798207618Srdivacky/// target indicates it is beneficial. e.g. On x86, it's usually better to 799207618Srdivacky/// promote i16 operations to i32 since i16 instructions are longer. 800207618SrdivackySDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 801207618Srdivacky if (!LegalOperations) 802207618Srdivacky return SDValue(); 803207618Srdivacky 804207618Srdivacky EVT VT = Op.getValueType(); 805207618Srdivacky if (VT.isVector() || !VT.isInteger()) 806207618Srdivacky return SDValue(); 807207618Srdivacky 808207618Srdivacky // If operation type is 'undesirable', e.g. i16 on x86, consider 809207618Srdivacky // promoting it. 810207618Srdivacky unsigned Opc = Op.getOpcode(); 811207618Srdivacky if (TLI.isTypeDesirableForOp(Opc, VT)) 812207618Srdivacky return SDValue(); 813207618Srdivacky 814207618Srdivacky EVT PVT = VT; 815207618Srdivacky // Consult target whether it is a good idea to promote this operation and 816207618Srdivacky // what's the right type to promote it to. 817207618Srdivacky if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 818207618Srdivacky assert(PVT != VT && "Don't know what type to promote to!"); 819207618Srdivacky 820207618Srdivacky bool Replace0 = false; 821207618Srdivacky SDValue N0 = Op.getOperand(0); 822207618Srdivacky SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 823207618Srdivacky if (NN0.getNode() == 0) 824207618Srdivacky return SDValue(); 825207618Srdivacky 826207618Srdivacky bool Replace1 = false; 827207618Srdivacky SDValue N1 = Op.getOperand(1); 828208599Srdivacky SDValue NN1; 829208599Srdivacky if (N0 == N1) 830208599Srdivacky NN1 = NN0; 831208599Srdivacky else { 832208599Srdivacky NN1 = PromoteOperand(N1, PVT, Replace1); 833208599Srdivacky if (NN1.getNode() == 0) 834208599Srdivacky return SDValue(); 835208599Srdivacky } 836207618Srdivacky 837207618Srdivacky AddToWorkList(NN0.getNode()); 838208599Srdivacky if (NN1.getNode()) 839208599Srdivacky AddToWorkList(NN1.getNode()); 840207618Srdivacky 841207618Srdivacky if (Replace0) 842207618Srdivacky ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 843207618Srdivacky if (Replace1) 844207618Srdivacky ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 845207618Srdivacky 846207618Srdivacky DEBUG(dbgs() << "\nPromoting "; 847207618Srdivacky Op.getNode()->dump(&DAG)); 848207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 849207618Srdivacky return DAG.getNode(ISD::TRUNCATE, dl, VT, 850207618Srdivacky DAG.getNode(Opc, dl, PVT, NN0, NN1)); 851207618Srdivacky } 852207618Srdivacky return SDValue(); 853207618Srdivacky} 854207618Srdivacky 855207618Srdivacky/// PromoteIntShiftOp - Promote the specified integer shift operation if the 856207618Srdivacky/// target indicates it is beneficial. e.g. On x86, it's usually better to 857207618Srdivacky/// promote i16 operations to i32 since i16 instructions are longer. 858207618SrdivackySDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 859207618Srdivacky if (!LegalOperations) 860207618Srdivacky return SDValue(); 861207618Srdivacky 862207618Srdivacky EVT VT = Op.getValueType(); 863207618Srdivacky if (VT.isVector() || !VT.isInteger()) 864207618Srdivacky return SDValue(); 865207618Srdivacky 866207618Srdivacky // If operation type is 'undesirable', e.g. i16 on x86, consider 867207618Srdivacky // promoting it. 868207618Srdivacky unsigned Opc = Op.getOpcode(); 869207618Srdivacky if (TLI.isTypeDesirableForOp(Opc, VT)) 870207618Srdivacky return SDValue(); 871207618Srdivacky 872207618Srdivacky EVT PVT = VT; 873207618Srdivacky // Consult target whether it is a good idea to promote this operation and 874207618Srdivacky // what's the right type to promote it to. 875207618Srdivacky if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 876207618Srdivacky assert(PVT != VT && "Don't know what type to promote to!"); 877207618Srdivacky 878207618Srdivacky bool Replace = false; 879207618Srdivacky SDValue N0 = Op.getOperand(0); 880207618Srdivacky if (Opc == ISD::SRA) 881207618Srdivacky N0 = SExtPromoteOperand(Op.getOperand(0), PVT); 882207618Srdivacky else if (Opc == ISD::SRL) 883207618Srdivacky N0 = ZExtPromoteOperand(Op.getOperand(0), PVT); 884207618Srdivacky else 885207618Srdivacky N0 = PromoteOperand(N0, PVT, Replace); 886207618Srdivacky if (N0.getNode() == 0) 887207618Srdivacky return SDValue(); 888207618Srdivacky 889207618Srdivacky AddToWorkList(N0.getNode()); 890207618Srdivacky if (Replace) 891207618Srdivacky ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 892207618Srdivacky 893207618Srdivacky DEBUG(dbgs() << "\nPromoting "; 894207618Srdivacky Op.getNode()->dump(&DAG)); 895207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 896207618Srdivacky return DAG.getNode(ISD::TRUNCATE, dl, VT, 897207618Srdivacky DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1))); 898207618Srdivacky } 899207618Srdivacky return SDValue(); 900207618Srdivacky} 901207618Srdivacky 902207618SrdivackySDValue DAGCombiner::PromoteExtend(SDValue Op) { 903207618Srdivacky if (!LegalOperations) 904207618Srdivacky return SDValue(); 905207618Srdivacky 906207618Srdivacky EVT VT = Op.getValueType(); 907207618Srdivacky if (VT.isVector() || !VT.isInteger()) 908207618Srdivacky return SDValue(); 909207618Srdivacky 910207618Srdivacky // If operation type is 'undesirable', e.g. i16 on x86, consider 911207618Srdivacky // promoting it. 912207618Srdivacky unsigned Opc = Op.getOpcode(); 913207618Srdivacky if (TLI.isTypeDesirableForOp(Opc, VT)) 914207618Srdivacky return SDValue(); 915207618Srdivacky 916207618Srdivacky EVT PVT = VT; 917207618Srdivacky // Consult target whether it is a good idea to promote this operation and 918207618Srdivacky // what's the right type to promote it to. 919207618Srdivacky if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 920207618Srdivacky assert(PVT != VT && "Don't know what type to promote to!"); 921207618Srdivacky // fold (aext (aext x)) -> (aext x) 922207618Srdivacky // fold (aext (zext x)) -> (zext x) 923207618Srdivacky // fold (aext (sext x)) -> (sext x) 924207618Srdivacky DEBUG(dbgs() << "\nPromoting "; 925207618Srdivacky Op.getNode()->dump(&DAG)); 926207618Srdivacky return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), VT, Op.getOperand(0)); 927207618Srdivacky } 928207618Srdivacky return SDValue(); 929207618Srdivacky} 930207618Srdivacky 931207618Srdivackybool DAGCombiner::PromoteLoad(SDValue Op) { 932207618Srdivacky if (!LegalOperations) 933207618Srdivacky return false; 934207618Srdivacky 935207618Srdivacky EVT VT = Op.getValueType(); 936207618Srdivacky if (VT.isVector() || !VT.isInteger()) 937207618Srdivacky return false; 938207618Srdivacky 939207618Srdivacky // If operation type is 'undesirable', e.g. i16 on x86, consider 940207618Srdivacky // promoting it. 941207618Srdivacky unsigned Opc = Op.getOpcode(); 942207618Srdivacky if (TLI.isTypeDesirableForOp(Opc, VT)) 943207618Srdivacky return false; 944207618Srdivacky 945207618Srdivacky EVT PVT = VT; 946207618Srdivacky // Consult target whether it is a good idea to promote this operation and 947207618Srdivacky // what's the right type to promote it to. 948207618Srdivacky if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 949207618Srdivacky assert(PVT != VT && "Don't know what type to promote to!"); 950207618Srdivacky 951207618Srdivacky DebugLoc dl = Op.getDebugLoc(); 952207618Srdivacky SDNode *N = Op.getNode(); 953207618Srdivacky LoadSDNode *LD = cast<LoadSDNode>(N); 954207618Srdivacky EVT MemVT = LD->getMemoryVT(); 955207618Srdivacky ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 956219077Sdim ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 957218893Sdim : ISD::EXTLOAD) 958207618Srdivacky : LD->getExtensionType(); 959218893Sdim SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT, 960207618Srdivacky LD->getChain(), LD->getBasePtr(), 961218893Sdim LD->getPointerInfo(), 962207618Srdivacky MemVT, LD->isVolatile(), 963207618Srdivacky LD->isNonTemporal(), LD->getAlignment()); 964207618Srdivacky SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD); 965207618Srdivacky 966207618Srdivacky DEBUG(dbgs() << "\nPromoting "; 967207618Srdivacky N->dump(&DAG); 968207618Srdivacky dbgs() << "\nTo: "; 969207618Srdivacky Result.getNode()->dump(&DAG); 970207618Srdivacky dbgs() << '\n'); 971207618Srdivacky WorkListRemover DeadNodes(*this); 972239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 973239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 974207618Srdivacky removeFromWorkList(N); 975207618Srdivacky DAG.DeleteNode(N); 976207618Srdivacky AddToWorkList(Result.getNode()); 977207618Srdivacky return true; 978207618Srdivacky } 979207618Srdivacky return false; 980207618Srdivacky} 981207618Srdivacky 982207618Srdivacky 983193323Sed//===----------------------------------------------------------------------===// 984193323Sed// Main DAG Combiner implementation 985193323Sed//===----------------------------------------------------------------------===// 986193323Sed 987193323Sedvoid DAGCombiner::Run(CombineLevel AtLevel) { 988193323Sed // set the instance variables, so that the various visit routines may use it. 989193323Sed Level = AtLevel; 990234353Sdim LegalOperations = Level >= AfterLegalizeVectorOps; 991234353Sdim LegalTypes = Level >= AfterLegalizeTypes; 992193323Sed 993193323Sed // Add all the dag nodes to the worklist. 994193323Sed for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 995193323Sed E = DAG.allnodes_end(); I != E; ++I) 996234353Sdim AddToWorkList(I); 997193323Sed 998193323Sed // Create a dummy node (which is not added to allnodes), that adds a reference 999193323Sed // to the root node, preventing it from being deleted, and tracking any 1000193323Sed // changes of the root. 1001193323Sed HandleSDNode Dummy(DAG.getRoot()); 1002193323Sed 1003193323Sed // The root of the dag may dangle to deleted nodes until the dag combiner is 1004193323Sed // done. Set it to null to avoid confusion. 1005193323Sed DAG.setRoot(SDValue()); 1006193323Sed 1007234353Sdim // while the worklist isn't empty, find a node and 1008193323Sed // try and combine it. 1009234353Sdim while (!WorkListContents.empty()) { 1010234353Sdim SDNode *N; 1011234353Sdim // The WorkListOrder holds the SDNodes in order, but it may contain duplicates. 1012234353Sdim // In order to avoid a linear scan, we use a set (O(log N)) to hold what the 1013234353Sdim // worklist *should* contain, and check the node we want to visit is should 1014234353Sdim // actually be visited. 1015234353Sdim do { 1016234353Sdim N = WorkListOrder.pop_back_val(); 1017234353Sdim } while (!WorkListContents.erase(N)); 1018193323Sed 1019193323Sed // If N has no uses, it is dead. Make sure to revisit all N's operands once 1020193323Sed // N is deleted from the DAG, since they too may now be dead or may have a 1021193323Sed // reduced number of uses, allowing other xforms. 1022193323Sed if (N->use_empty() && N != &Dummy) { 1023193323Sed for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1024193323Sed AddToWorkList(N->getOperand(i).getNode()); 1025193323Sed 1026193323Sed DAG.DeleteNode(N); 1027193323Sed continue; 1028193323Sed } 1029193323Sed 1030193323Sed SDValue RV = combine(N); 1031193323Sed 1032193323Sed if (RV.getNode() == 0) 1033193323Sed continue; 1034193323Sed 1035193323Sed ++NodesCombined; 1036193323Sed 1037193323Sed // If we get back the same node we passed in, rather than a new node or 1038193323Sed // zero, we know that the node must have defined multiple values and 1039193323Sed // CombineTo was used. Since CombineTo takes care of the worklist 1040193323Sed // mechanics for us, we have no work to do in this case. 1041193323Sed if (RV.getNode() == N) 1042193323Sed continue; 1043193323Sed 1044193323Sed assert(N->getOpcode() != ISD::DELETED_NODE && 1045193323Sed RV.getNode()->getOpcode() != ISD::DELETED_NODE && 1046193323Sed "Node was deleted but visit returned new node!"); 1047193323Sed 1048218893Sdim DEBUG(dbgs() << "\nReplacing.3 "; 1049198090Srdivacky N->dump(&DAG); 1050202375Srdivacky dbgs() << "\nWith: "; 1051198090Srdivacky RV.getNode()->dump(&DAG); 1052202375Srdivacky dbgs() << '\n'); 1053224145Sdim 1054223017Sdim // Transfer debug value. 1055223017Sdim DAG.TransferDbgValues(SDValue(N, 0), RV); 1056193323Sed WorkListRemover DeadNodes(*this); 1057193323Sed if (N->getNumValues() == RV.getNode()->getNumValues()) 1058239462Sdim DAG.ReplaceAllUsesWith(N, RV.getNode()); 1059193323Sed else { 1060193323Sed assert(N->getValueType(0) == RV.getValueType() && 1061193323Sed N->getNumValues() == 1 && "Type mismatch"); 1062193323Sed SDValue OpV = RV; 1063239462Sdim DAG.ReplaceAllUsesWith(N, &OpV); 1064193323Sed } 1065193323Sed 1066193323Sed // Push the new node and any users onto the worklist 1067193323Sed AddToWorkList(RV.getNode()); 1068193323Sed AddUsersToWorkList(RV.getNode()); 1069193323Sed 1070193323Sed // Add any uses of the old node to the worklist in case this node is the 1071193323Sed // last one that uses them. They may become dead after this node is 1072193323Sed // deleted. 1073193323Sed for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1074193323Sed AddToWorkList(N->getOperand(i).getNode()); 1075193323Sed 1076193323Sed // Finally, if the node is now dead, remove it from the graph. The node 1077193323Sed // may not be dead if the replacement process recursively simplified to 1078193323Sed // something else needing this node. 1079193323Sed if (N->use_empty()) { 1080193323Sed // Nodes can be reintroduced into the worklist. Make sure we do not 1081193323Sed // process a node that has been replaced. 1082193323Sed removeFromWorkList(N); 1083193323Sed 1084193323Sed // Finally, since the node is now dead, remove it from the graph. 1085193323Sed DAG.DeleteNode(N); 1086193323Sed } 1087193323Sed } 1088193323Sed 1089193323Sed // If the root changed (e.g. it was a dead load, update the root). 1090193323Sed DAG.setRoot(Dummy.getValue()); 1091234982Sdim DAG.RemoveDeadNodes(); 1092193323Sed} 1093193323Sed 1094193323SedSDValue DAGCombiner::visit(SDNode *N) { 1095207618Srdivacky switch (N->getOpcode()) { 1096193323Sed default: break; 1097193323Sed case ISD::TokenFactor: return visitTokenFactor(N); 1098193323Sed case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1099193323Sed case ISD::ADD: return visitADD(N); 1100193323Sed case ISD::SUB: return visitSUB(N); 1101193323Sed case ISD::ADDC: return visitADDC(N); 1102234353Sdim case ISD::SUBC: return visitSUBC(N); 1103193323Sed case ISD::ADDE: return visitADDE(N); 1104234353Sdim case ISD::SUBE: return visitSUBE(N); 1105193323Sed case ISD::MUL: return visitMUL(N); 1106193323Sed case ISD::SDIV: return visitSDIV(N); 1107193323Sed case ISD::UDIV: return visitUDIV(N); 1108193323Sed case ISD::SREM: return visitSREM(N); 1109193323Sed case ISD::UREM: return visitUREM(N); 1110193323Sed case ISD::MULHU: return visitMULHU(N); 1111193323Sed case ISD::MULHS: return visitMULHS(N); 1112193323Sed case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1113193323Sed case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1114223017Sdim case ISD::SMULO: return visitSMULO(N); 1115223017Sdim case ISD::UMULO: return visitUMULO(N); 1116193323Sed case ISD::SDIVREM: return visitSDIVREM(N); 1117193323Sed case ISD::UDIVREM: return visitUDIVREM(N); 1118193323Sed case ISD::AND: return visitAND(N); 1119193323Sed case ISD::OR: return visitOR(N); 1120193323Sed case ISD::XOR: return visitXOR(N); 1121193323Sed case ISD::SHL: return visitSHL(N); 1122193323Sed case ISD::SRA: return visitSRA(N); 1123193323Sed case ISD::SRL: return visitSRL(N); 1124193323Sed case ISD::CTLZ: return visitCTLZ(N); 1125234353Sdim case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1126193323Sed case ISD::CTTZ: return visitCTTZ(N); 1127234353Sdim case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1128193323Sed case ISD::CTPOP: return visitCTPOP(N); 1129193323Sed case ISD::SELECT: return visitSELECT(N); 1130251662Sdim case ISD::VSELECT: return visitVSELECT(N); 1131193323Sed case ISD::SELECT_CC: return visitSELECT_CC(N); 1132193323Sed case ISD::SETCC: return visitSETCC(N); 1133193323Sed case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1134193323Sed case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1135193323Sed case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1136193323Sed case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1137193323Sed case ISD::TRUNCATE: return visitTRUNCATE(N); 1138218893Sdim case ISD::BITCAST: return visitBITCAST(N); 1139193323Sed case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1140193323Sed case ISD::FADD: return visitFADD(N); 1141193323Sed case ISD::FSUB: return visitFSUB(N); 1142193323Sed case ISD::FMUL: return visitFMUL(N); 1143239462Sdim case ISD::FMA: return visitFMA(N); 1144193323Sed case ISD::FDIV: return visitFDIV(N); 1145193323Sed case ISD::FREM: return visitFREM(N); 1146193323Sed case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1147193323Sed case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1148193323Sed case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1149193323Sed case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1150193323Sed case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1151193323Sed case ISD::FP_ROUND: return visitFP_ROUND(N); 1152193323Sed case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1153193323Sed case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1154193323Sed case ISD::FNEG: return visitFNEG(N); 1155193323Sed case ISD::FABS: return visitFABS(N); 1156239462Sdim case ISD::FFLOOR: return visitFFLOOR(N); 1157239462Sdim case ISD::FCEIL: return visitFCEIL(N); 1158239462Sdim case ISD::FTRUNC: return visitFTRUNC(N); 1159193323Sed case ISD::BRCOND: return visitBRCOND(N); 1160193323Sed case ISD::BR_CC: return visitBR_CC(N); 1161193323Sed case ISD::LOAD: return visitLOAD(N); 1162193323Sed case ISD::STORE: return visitSTORE(N); 1163193323Sed case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1164193323Sed case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1165193323Sed case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1166193323Sed case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1167226633Sdim case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1168193323Sed case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1169193323Sed } 1170193323Sed return SDValue(); 1171193323Sed} 1172193323Sed 1173193323SedSDValue DAGCombiner::combine(SDNode *N) { 1174193323Sed SDValue RV = visit(N); 1175193323Sed 1176193323Sed // If nothing happened, try a target-specific DAG combine. 1177193323Sed if (RV.getNode() == 0) { 1178193323Sed assert(N->getOpcode() != ISD::DELETED_NODE && 1179193323Sed "Node was deleted but visit returned NULL!"); 1180193323Sed 1181193323Sed if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1182193323Sed TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1183193323Sed 1184193323Sed // Expose the DAG combiner to the target combiner impls. 1185193323Sed TargetLowering::DAGCombinerInfo 1186249423Sdim DagCombineInfo(DAG, Level, false, this); 1187193323Sed 1188193323Sed RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1189193323Sed } 1190193323Sed } 1191193323Sed 1192207618Srdivacky // If nothing happened still, try promoting the operation. 1193207618Srdivacky if (RV.getNode() == 0) { 1194207618Srdivacky switch (N->getOpcode()) { 1195207618Srdivacky default: break; 1196207618Srdivacky case ISD::ADD: 1197207618Srdivacky case ISD::SUB: 1198207618Srdivacky case ISD::MUL: 1199207618Srdivacky case ISD::AND: 1200207618Srdivacky case ISD::OR: 1201207618Srdivacky case ISD::XOR: 1202207618Srdivacky RV = PromoteIntBinOp(SDValue(N, 0)); 1203207618Srdivacky break; 1204207618Srdivacky case ISD::SHL: 1205207618Srdivacky case ISD::SRA: 1206207618Srdivacky case ISD::SRL: 1207207618Srdivacky RV = PromoteIntShiftOp(SDValue(N, 0)); 1208207618Srdivacky break; 1209207618Srdivacky case ISD::SIGN_EXTEND: 1210207618Srdivacky case ISD::ZERO_EXTEND: 1211207618Srdivacky case ISD::ANY_EXTEND: 1212207618Srdivacky RV = PromoteExtend(SDValue(N, 0)); 1213207618Srdivacky break; 1214207618Srdivacky case ISD::LOAD: 1215207618Srdivacky if (PromoteLoad(SDValue(N, 0))) 1216207618Srdivacky RV = SDValue(N, 0); 1217207618Srdivacky break; 1218207618Srdivacky } 1219207618Srdivacky } 1220207618Srdivacky 1221193323Sed // If N is a commutative binary node, try commuting it to enable more 1222193323Sed // sdisel CSE. 1223193323Sed if (RV.getNode() == 0 && 1224193323Sed SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1225193323Sed N->getNumValues() == 1) { 1226193323Sed SDValue N0 = N->getOperand(0); 1227193323Sed SDValue N1 = N->getOperand(1); 1228193323Sed 1229193323Sed // Constant operands are canonicalized to RHS. 1230193323Sed if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1231193323Sed SDValue Ops[] = { N1, N0 }; 1232193323Sed SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), 1233193323Sed Ops, 2); 1234193323Sed if (CSENode) 1235193323Sed return SDValue(CSENode, 0); 1236193323Sed } 1237193323Sed } 1238193323Sed 1239193323Sed return RV; 1240193323Sed} 1241193323Sed 1242193323Sed/// getInputChainForNode - Given a node, return its input chain if it has one, 1243193323Sed/// otherwise return a null sd operand. 1244193323Sedstatic SDValue getInputChainForNode(SDNode *N) { 1245193323Sed if (unsigned NumOps = N->getNumOperands()) { 1246193323Sed if (N->getOperand(0).getValueType() == MVT::Other) 1247193323Sed return N->getOperand(0); 1248193323Sed else if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1249193323Sed return N->getOperand(NumOps-1); 1250193323Sed for (unsigned i = 1; i < NumOps-1; ++i) 1251193323Sed if (N->getOperand(i).getValueType() == MVT::Other) 1252193323Sed return N->getOperand(i); 1253193323Sed } 1254193323Sed return SDValue(); 1255193323Sed} 1256193323Sed 1257193323SedSDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1258193323Sed // If N has two operands, where one has an input chain equal to the other, 1259193323Sed // the 'other' chain is redundant. 1260193323Sed if (N->getNumOperands() == 2) { 1261193323Sed if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1262193323Sed return N->getOperand(0); 1263193323Sed if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1264193323Sed return N->getOperand(1); 1265193323Sed } 1266193323Sed 1267193323Sed SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1268193323Sed SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1269193323Sed SmallPtrSet<SDNode*, 16> SeenOps; 1270193323Sed bool Changed = false; // If we should replace this token factor. 1271193323Sed 1272193323Sed // Start out with this token factor. 1273193323Sed TFs.push_back(N); 1274193323Sed 1275193323Sed // Iterate through token factors. The TFs grows when new token factors are 1276193323Sed // encountered. 1277193323Sed for (unsigned i = 0; i < TFs.size(); ++i) { 1278193323Sed SDNode *TF = TFs[i]; 1279193323Sed 1280193323Sed // Check each of the operands. 1281193323Sed for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) { 1282193323Sed SDValue Op = TF->getOperand(i); 1283193323Sed 1284193323Sed switch (Op.getOpcode()) { 1285193323Sed case ISD::EntryToken: 1286193323Sed // Entry tokens don't need to be added to the list. They are 1287193323Sed // rededundant. 1288193323Sed Changed = true; 1289193323Sed break; 1290193323Sed 1291193323Sed case ISD::TokenFactor: 1292198090Srdivacky if (Op.hasOneUse() && 1293193323Sed std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) { 1294193323Sed // Queue up for processing. 1295193323Sed TFs.push_back(Op.getNode()); 1296193323Sed // Clean up in case the token factor is removed. 1297193323Sed AddToWorkList(Op.getNode()); 1298193323Sed Changed = true; 1299193323Sed break; 1300193323Sed } 1301193323Sed // Fall thru 1302193323Sed 1303193323Sed default: 1304193323Sed // Only add if it isn't already in the list. 1305193323Sed if (SeenOps.insert(Op.getNode())) 1306193323Sed Ops.push_back(Op); 1307193323Sed else 1308193323Sed Changed = true; 1309193323Sed break; 1310193323Sed } 1311193323Sed } 1312193323Sed } 1313218893Sdim 1314193323Sed SDValue Result; 1315193323Sed 1316193323Sed // If we've change things around then replace token factor. 1317193323Sed if (Changed) { 1318193323Sed if (Ops.empty()) { 1319193323Sed // The entry token is the only possible outcome. 1320193323Sed Result = DAG.getEntryNode(); 1321193323Sed } else { 1322193323Sed // New and improved token factor. 1323193323Sed Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 1324193323Sed MVT::Other, &Ops[0], Ops.size()); 1325193323Sed } 1326193323Sed 1327193323Sed // Don't add users to work list. 1328193323Sed return CombineTo(N, Result, false); 1329193323Sed } 1330193323Sed 1331193323Sed return Result; 1332193323Sed} 1333193323Sed 1334193323Sed/// MERGE_VALUES can always be eliminated. 1335193323SedSDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1336193323Sed WorkListRemover DeadNodes(*this); 1337198090Srdivacky // Replacing results may cause a different MERGE_VALUES to suddenly 1338198090Srdivacky // be CSE'd with N, and carry its uses with it. Iterate until no 1339198090Srdivacky // uses remain, to ensure that the node can be safely deleted. 1340239462Sdim // First add the users of this node to the work list so that they 1341239462Sdim // can be tried again once they have new operands. 1342239462Sdim AddUsersToWorkList(N); 1343198090Srdivacky do { 1344198090Srdivacky for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1345239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1346198090Srdivacky } while (!N->use_empty()); 1347193323Sed removeFromWorkList(N); 1348193323Sed DAG.DeleteNode(N); 1349193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 1350193323Sed} 1351193323Sed 1352193323Sedstatic 1353193323SedSDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1, 1354193323Sed SelectionDAG &DAG) { 1355198090Srdivacky EVT VT = N0.getValueType(); 1356193323Sed SDValue N00 = N0.getOperand(0); 1357193323Sed SDValue N01 = N0.getOperand(1); 1358193323Sed ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01); 1359193323Sed 1360193323Sed if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() && 1361193323Sed isa<ConstantSDNode>(N00.getOperand(1))) { 1362193323Sed // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1363193323Sed N0 = DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, 1364193323Sed DAG.getNode(ISD::SHL, N00.getDebugLoc(), VT, 1365193323Sed N00.getOperand(0), N01), 1366193323Sed DAG.getNode(ISD::SHL, N01.getDebugLoc(), VT, 1367193323Sed N00.getOperand(1), N01)); 1368193323Sed return DAG.getNode(ISD::ADD, DL, VT, N0, N1); 1369193323Sed } 1370193323Sed 1371193323Sed return SDValue(); 1372193323Sed} 1373193323Sed 1374193323SedSDValue DAGCombiner::visitADD(SDNode *N) { 1375193323Sed SDValue N0 = N->getOperand(0); 1376193323Sed SDValue N1 = N->getOperand(1); 1377193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1378193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1379198090Srdivacky EVT VT = N0.getValueType(); 1380193323Sed 1381193323Sed // fold vector ops 1382193323Sed if (VT.isVector()) { 1383193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 1384193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 1385249423Sdim 1386249423Sdim // fold (add x, 0) -> x, vector edition 1387249423Sdim if (ISD::isBuildVectorAllZeros(N1.getNode())) 1388249423Sdim return N0; 1389249423Sdim if (ISD::isBuildVectorAllZeros(N0.getNode())) 1390249423Sdim return N1; 1391193323Sed } 1392193323Sed 1393193323Sed // fold (add x, undef) -> undef 1394193323Sed if (N0.getOpcode() == ISD::UNDEF) 1395193323Sed return N0; 1396193323Sed if (N1.getOpcode() == ISD::UNDEF) 1397193323Sed return N1; 1398193323Sed // fold (add c1, c2) -> c1+c2 1399193323Sed if (N0C && N1C) 1400193323Sed return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C); 1401193323Sed // canonicalize constant to RHS 1402193323Sed if (N0C && !N1C) 1403193323Sed return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0); 1404193323Sed // fold (add x, 0) -> x 1405193323Sed if (N1C && N1C->isNullValue()) 1406193323Sed return N0; 1407193323Sed // fold (add Sym, c) -> Sym+c 1408193323Sed if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1409193323Sed if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C && 1410193323Sed GA->getOpcode() == ISD::GlobalAddress) 1411210299Sed return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT, 1412193323Sed GA->getOffset() + 1413193323Sed (uint64_t)N1C->getSExtValue()); 1414193323Sed // fold ((c1-A)+c2) -> (c1+c2)-A 1415193323Sed if (N1C && N0.getOpcode() == ISD::SUB) 1416193323Sed if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 1417193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1418193323Sed DAG.getConstant(N1C->getAPIntValue()+ 1419193323Sed N0C->getAPIntValue(), VT), 1420193323Sed N0.getOperand(1)); 1421193323Sed // reassociate add 1422193323Sed SDValue RADD = ReassociateOps(ISD::ADD, N->getDebugLoc(), N0, N1); 1423193323Sed if (RADD.getNode() != 0) 1424193323Sed return RADD; 1425193323Sed // fold ((0-A) + B) -> B-A 1426193323Sed if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) && 1427193323Sed cast<ConstantSDNode>(N0.getOperand(0))->isNullValue()) 1428193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, N0.getOperand(1)); 1429193323Sed // fold (A + (0-B)) -> A-B 1430193323Sed if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) && 1431193323Sed cast<ConstantSDNode>(N1.getOperand(0))->isNullValue()) 1432193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1.getOperand(1)); 1433193323Sed // fold (A+(B-A)) -> B 1434193323Sed if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1435193323Sed return N1.getOperand(0); 1436193323Sed // fold ((B-A)+A) -> B 1437193323Sed if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1438193323Sed return N0.getOperand(0); 1439193323Sed // fold (A+(B-(A+C))) to (B-C) 1440193323Sed if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1441193323Sed N0 == N1.getOperand(1).getOperand(0)) 1442193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0), 1443193323Sed N1.getOperand(1).getOperand(1)); 1444193323Sed // fold (A+(B-(C+A))) to (B-C) 1445193323Sed if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1446193323Sed N0 == N1.getOperand(1).getOperand(1)) 1447193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0), 1448193323Sed N1.getOperand(1).getOperand(0)); 1449193323Sed // fold (A+((B-A)+or-C)) to (B+or-C) 1450193323Sed if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1451193323Sed N1.getOperand(0).getOpcode() == ISD::SUB && 1452193323Sed N0 == N1.getOperand(0).getOperand(1)) 1453193323Sed return DAG.getNode(N1.getOpcode(), N->getDebugLoc(), VT, 1454193323Sed N1.getOperand(0).getOperand(0), N1.getOperand(1)); 1455193323Sed 1456193323Sed // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1457193323Sed if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1458193323Sed SDValue N00 = N0.getOperand(0); 1459193323Sed SDValue N01 = N0.getOperand(1); 1460193323Sed SDValue N10 = N1.getOperand(0); 1461193323Sed SDValue N11 = N1.getOperand(1); 1462193323Sed 1463193323Sed if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10)) 1464193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1465193323Sed DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, N00, N10), 1466193323Sed DAG.getNode(ISD::ADD, N1.getDebugLoc(), VT, N01, N11)); 1467193323Sed } 1468193323Sed 1469193323Sed if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 1470193323Sed return SDValue(N, 0); 1471193323Sed 1472193323Sed // fold (a+b) -> (a|b) iff a and b share no bits. 1473193323Sed if (VT.isInteger() && !VT.isVector()) { 1474193323Sed APInt LHSZero, LHSOne; 1475193323Sed APInt RHSZero, RHSOne; 1476234353Sdim DAG.ComputeMaskedBits(N0, LHSZero, LHSOne); 1477193323Sed 1478193323Sed if (LHSZero.getBoolValue()) { 1479234353Sdim DAG.ComputeMaskedBits(N1, RHSZero, RHSOne); 1480193323Sed 1481193323Sed // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1482193323Sed // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1483234353Sdim if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1484193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1); 1485193323Sed } 1486193323Sed } 1487193323Sed 1488193323Sed // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1489193323Sed if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) { 1490193323Sed SDValue Result = combineShlAddConstant(N->getDebugLoc(), N0, N1, DAG); 1491193323Sed if (Result.getNode()) return Result; 1492193323Sed } 1493193323Sed if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) { 1494193323Sed SDValue Result = combineShlAddConstant(N->getDebugLoc(), N1, N0, DAG); 1495193323Sed if (Result.getNode()) return Result; 1496193323Sed } 1497193323Sed 1498202878Srdivacky // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1499202878Srdivacky if (N1.getOpcode() == ISD::SHL && 1500202878Srdivacky N1.getOperand(0).getOpcode() == ISD::SUB) 1501202878Srdivacky if (ConstantSDNode *C = 1502202878Srdivacky dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0))) 1503202878Srdivacky if (C->getAPIntValue() == 0) 1504202878Srdivacky return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, 1505202878Srdivacky DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1506202878Srdivacky N1.getOperand(0).getOperand(1), 1507202878Srdivacky N1.getOperand(1))); 1508202878Srdivacky if (N0.getOpcode() == ISD::SHL && 1509202878Srdivacky N0.getOperand(0).getOpcode() == ISD::SUB) 1510202878Srdivacky if (ConstantSDNode *C = 1511202878Srdivacky dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0))) 1512202878Srdivacky if (C->getAPIntValue() == 0) 1513202878Srdivacky return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, 1514202878Srdivacky DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1515202878Srdivacky N0.getOperand(0).getOperand(1), 1516202878Srdivacky N0.getOperand(1))); 1517202878Srdivacky 1518218893Sdim if (N1.getOpcode() == ISD::AND) { 1519218893Sdim SDValue AndOp0 = N1.getOperand(0); 1520218893Sdim ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1)); 1521218893Sdim unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1522218893Sdim unsigned DestBits = VT.getScalarType().getSizeInBits(); 1523218893Sdim 1524218893Sdim // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1525218893Sdim // and similar xforms where the inner op is either ~0 or 0. 1526218893Sdim if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) { 1527218893Sdim DebugLoc DL = N->getDebugLoc(); 1528218893Sdim return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); 1529218893Sdim } 1530218893Sdim } 1531218893Sdim 1532218893Sdim // add (sext i1), X -> sub X, (zext i1) 1533218893Sdim if (N0.getOpcode() == ISD::SIGN_EXTEND && 1534218893Sdim N0.getOperand(0).getValueType() == MVT::i1 && 1535218893Sdim !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1536218893Sdim DebugLoc DL = N->getDebugLoc(); 1537218893Sdim SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1538218893Sdim return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1539218893Sdim } 1540218893Sdim 1541193323Sed return SDValue(); 1542193323Sed} 1543193323Sed 1544193323SedSDValue DAGCombiner::visitADDC(SDNode *N) { 1545193323Sed SDValue N0 = N->getOperand(0); 1546193323Sed SDValue N1 = N->getOperand(1); 1547193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1548193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1549198090Srdivacky EVT VT = N0.getValueType(); 1550193323Sed 1551193323Sed // If the flag result is dead, turn this into an ADD. 1552234353Sdim if (!N->hasAnyUseOfValue(1)) 1553234353Sdim return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N1), 1554193323Sed DAG.getNode(ISD::CARRY_FALSE, 1555218893Sdim N->getDebugLoc(), MVT::Glue)); 1556193323Sed 1557193323Sed // canonicalize constant to RHS. 1558193323Sed if (N0C && !N1C) 1559193323Sed return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0); 1560193323Sed 1561193323Sed // fold (addc x, 0) -> x + no carry out 1562193323Sed if (N1C && N1C->isNullValue()) 1563193323Sed return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 1564218893Sdim N->getDebugLoc(), MVT::Glue)); 1565193323Sed 1566193323Sed // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. 1567193323Sed APInt LHSZero, LHSOne; 1568193323Sed APInt RHSZero, RHSOne; 1569234353Sdim DAG.ComputeMaskedBits(N0, LHSZero, LHSOne); 1570193323Sed 1571193323Sed if (LHSZero.getBoolValue()) { 1572234353Sdim DAG.ComputeMaskedBits(N1, RHSZero, RHSOne); 1573193323Sed 1574193323Sed // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1575193323Sed // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1576234353Sdim if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1577193323Sed return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1), 1578193323Sed DAG.getNode(ISD::CARRY_FALSE, 1579218893Sdim N->getDebugLoc(), MVT::Glue)); 1580193323Sed } 1581193323Sed 1582193323Sed return SDValue(); 1583193323Sed} 1584193323Sed 1585193323SedSDValue DAGCombiner::visitADDE(SDNode *N) { 1586193323Sed SDValue N0 = N->getOperand(0); 1587193323Sed SDValue N1 = N->getOperand(1); 1588193323Sed SDValue CarryIn = N->getOperand(2); 1589193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1590193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1591193323Sed 1592193323Sed // canonicalize constant to RHS 1593193323Sed if (N0C && !N1C) 1594193323Sed return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(), 1595193323Sed N1, N0, CarryIn); 1596193323Sed 1597193323Sed // fold (adde x, y, false) -> (addc x, y) 1598193323Sed if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1599234353Sdim return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N0, N1); 1600193323Sed 1601193323Sed return SDValue(); 1602193323Sed} 1603193323Sed 1604218893Sdim// Since it may not be valid to emit a fold to zero for vector initializers 1605218893Sdim// check if we can before folding. 1606218893Sdimstatic SDValue tryFoldToZero(DebugLoc DL, const TargetLowering &TLI, EVT VT, 1607219077Sdim SelectionDAG &DAG, bool LegalOperations) { 1608218893Sdim if (!VT.isVector()) { 1609218893Sdim return DAG.getConstant(0, VT); 1610223017Sdim } 1611223017Sdim if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) { 1612218893Sdim // Produce a vector of zeros. 1613218893Sdim SDValue El = DAG.getConstant(0, VT.getVectorElementType()); 1614218893Sdim std::vector<SDValue> Ops(VT.getVectorNumElements(), El); 1615218893Sdim return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, 1616218893Sdim &Ops[0], Ops.size()); 1617218893Sdim } 1618218893Sdim return SDValue(); 1619218893Sdim} 1620218893Sdim 1621193323SedSDValue DAGCombiner::visitSUB(SDNode *N) { 1622193323Sed SDValue N0 = N->getOperand(0); 1623193323Sed SDValue N1 = N->getOperand(1); 1624193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1625193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1626224145Sdim ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? 0 : 1627224145Sdim dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode()); 1628198090Srdivacky EVT VT = N0.getValueType(); 1629193323Sed 1630193323Sed // fold vector ops 1631193323Sed if (VT.isVector()) { 1632193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 1633193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 1634249423Sdim 1635249423Sdim // fold (sub x, 0) -> x, vector edition 1636249423Sdim if (ISD::isBuildVectorAllZeros(N1.getNode())) 1637249423Sdim return N0; 1638193323Sed } 1639193323Sed 1640193323Sed // fold (sub x, x) -> 0 1641218893Sdim // FIXME: Refactor this and xor and other similar operations together. 1642193323Sed if (N0 == N1) 1643218893Sdim return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations); 1644193323Sed // fold (sub c1, c2) -> c1-c2 1645193323Sed if (N0C && N1C) 1646193323Sed return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C); 1647193323Sed // fold (sub x, c) -> (add x, -c) 1648193323Sed if (N1C) 1649193323Sed return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, 1650193323Sed DAG.getConstant(-N1C->getAPIntValue(), VT)); 1651202878Srdivacky // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 1652202878Srdivacky if (N0C && N0C->isAllOnesValue()) 1653202878Srdivacky return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0); 1654218893Sdim // fold A-(A-B) -> B 1655218893Sdim if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 1656218893Sdim return N1.getOperand(1); 1657193323Sed // fold (A+B)-A -> B 1658193323Sed if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 1659193323Sed return N0.getOperand(1); 1660193323Sed // fold (A+B)-B -> A 1661193323Sed if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 1662193323Sed return N0.getOperand(0); 1663224145Sdim // fold C2-(A+C1) -> (C2-C1)-A 1664224145Sdim if (N1.getOpcode() == ISD::ADD && N0C && N1C1) { 1665243830Sdim SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(), 1666243830Sdim VT); 1667224145Sdim return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, NewC, 1668239462Sdim N1.getOperand(0)); 1669224145Sdim } 1670193323Sed // fold ((A+(B+or-C))-B) -> A+or-C 1671193323Sed if (N0.getOpcode() == ISD::ADD && 1672193323Sed (N0.getOperand(1).getOpcode() == ISD::SUB || 1673193323Sed N0.getOperand(1).getOpcode() == ISD::ADD) && 1674193323Sed N0.getOperand(1).getOperand(0) == N1) 1675193323Sed return DAG.getNode(N0.getOperand(1).getOpcode(), N->getDebugLoc(), VT, 1676193323Sed N0.getOperand(0), N0.getOperand(1).getOperand(1)); 1677193323Sed // fold ((A+(C+B))-B) -> A+C 1678193323Sed if (N0.getOpcode() == ISD::ADD && 1679193323Sed N0.getOperand(1).getOpcode() == ISD::ADD && 1680193323Sed N0.getOperand(1).getOperand(1) == N1) 1681193323Sed return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, 1682193323Sed N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1683193323Sed // fold ((A-(B-C))-C) -> A-B 1684193323Sed if (N0.getOpcode() == ISD::SUB && 1685193323Sed N0.getOperand(1).getOpcode() == ISD::SUB && 1686193323Sed N0.getOperand(1).getOperand(1) == N1) 1687193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1688193323Sed N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1689193323Sed 1690193323Sed // If either operand of a sub is undef, the result is undef 1691193323Sed if (N0.getOpcode() == ISD::UNDEF) 1692193323Sed return N0; 1693193323Sed if (N1.getOpcode() == ISD::UNDEF) 1694193323Sed return N1; 1695193323Sed 1696193323Sed // If the relocation model supports it, consider symbol offsets. 1697193323Sed if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1698193323Sed if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 1699193323Sed // fold (sub Sym, c) -> Sym-c 1700193323Sed if (N1C && GA->getOpcode() == ISD::GlobalAddress) 1701210299Sed return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT, 1702193323Sed GA->getOffset() - 1703193323Sed (uint64_t)N1C->getSExtValue()); 1704193323Sed // fold (sub Sym+c1, Sym+c2) -> c1-c2 1705193323Sed if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 1706193323Sed if (GA->getGlobal() == GB->getGlobal()) 1707193323Sed return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 1708193323Sed VT); 1709193323Sed } 1710193323Sed 1711193323Sed return SDValue(); 1712193323Sed} 1713193323Sed 1714234353SdimSDValue DAGCombiner::visitSUBC(SDNode *N) { 1715234353Sdim SDValue N0 = N->getOperand(0); 1716234353Sdim SDValue N1 = N->getOperand(1); 1717234353Sdim ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1718234353Sdim ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1719234353Sdim EVT VT = N0.getValueType(); 1720234353Sdim 1721234353Sdim // If the flag result is dead, turn this into an SUB. 1722234353Sdim if (!N->hasAnyUseOfValue(1)) 1723234353Sdim return CombineTo(N, DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1), 1724234353Sdim DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1725234353Sdim MVT::Glue)); 1726234353Sdim 1727234353Sdim // fold (subc x, x) -> 0 + no borrow 1728234353Sdim if (N0 == N1) 1729234353Sdim return CombineTo(N, DAG.getConstant(0, VT), 1730234353Sdim DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1731234353Sdim MVT::Glue)); 1732234353Sdim 1733234353Sdim // fold (subc x, 0) -> x + no borrow 1734234353Sdim if (N1C && N1C->isNullValue()) 1735234353Sdim return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1736234353Sdim MVT::Glue)); 1737234353Sdim 1738234353Sdim // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 1739234353Sdim if (N0C && N0C->isAllOnesValue()) 1740234353Sdim return CombineTo(N, DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0), 1741234353Sdim DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1742234353Sdim MVT::Glue)); 1743234353Sdim 1744234353Sdim return SDValue(); 1745234353Sdim} 1746234353Sdim 1747234353SdimSDValue DAGCombiner::visitSUBE(SDNode *N) { 1748234353Sdim SDValue N0 = N->getOperand(0); 1749234353Sdim SDValue N1 = N->getOperand(1); 1750234353Sdim SDValue CarryIn = N->getOperand(2); 1751234353Sdim 1752234353Sdim // fold (sube x, y, false) -> (subc x, y) 1753234353Sdim if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1754234353Sdim return DAG.getNode(ISD::SUBC, N->getDebugLoc(), N->getVTList(), N0, N1); 1755234353Sdim 1756234353Sdim return SDValue(); 1757234353Sdim} 1758234353Sdim 1759193323SedSDValue DAGCombiner::visitMUL(SDNode *N) { 1760193323Sed SDValue N0 = N->getOperand(0); 1761193323Sed SDValue N1 = N->getOperand(1); 1762193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1763193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1764198090Srdivacky EVT VT = N0.getValueType(); 1765193323Sed 1766193323Sed // fold vector ops 1767193323Sed if (VT.isVector()) { 1768193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 1769193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 1770193323Sed } 1771193323Sed 1772193323Sed // fold (mul x, undef) -> 0 1773193323Sed if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 1774193323Sed return DAG.getConstant(0, VT); 1775193323Sed // fold (mul c1, c2) -> c1*c2 1776193323Sed if (N0C && N1C) 1777193323Sed return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C); 1778193323Sed // canonicalize constant to RHS 1779193323Sed if (N0C && !N1C) 1780193323Sed return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, N1, N0); 1781193323Sed // fold (mul x, 0) -> 0 1782193323Sed if (N1C && N1C->isNullValue()) 1783193323Sed return N1; 1784193323Sed // fold (mul x, -1) -> 0-x 1785193323Sed if (N1C && N1C->isAllOnesValue()) 1786193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1787193323Sed DAG.getConstant(0, VT), N0); 1788193323Sed // fold (mul x, (1 << c)) -> x << c 1789193323Sed if (N1C && N1C->getAPIntValue().isPowerOf2()) 1790193323Sed return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 1791193323Sed DAG.getConstant(N1C->getAPIntValue().logBase2(), 1792219077Sdim getShiftAmountTy(N0.getValueType()))); 1793193323Sed // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 1794193323Sed if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) { 1795193323Sed unsigned Log2Val = (-N1C->getAPIntValue()).logBase2(); 1796193323Sed // FIXME: If the input is something that is easily negated (e.g. a 1797193323Sed // single-use add), we should put the negate there. 1798193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1799193323Sed DAG.getConstant(0, VT), 1800193323Sed DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 1801219077Sdim DAG.getConstant(Log2Val, 1802219077Sdim getShiftAmountTy(N0.getValueType())))); 1803193323Sed } 1804193323Sed // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 1805193323Sed if (N1C && N0.getOpcode() == ISD::SHL && 1806193323Sed isa<ConstantSDNode>(N0.getOperand(1))) { 1807193323Sed SDValue C3 = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1808193323Sed N1, N0.getOperand(1)); 1809193323Sed AddToWorkList(C3.getNode()); 1810193323Sed return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 1811193323Sed N0.getOperand(0), C3); 1812193323Sed } 1813193323Sed 1814193323Sed // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 1815193323Sed // use. 1816193323Sed { 1817193323Sed SDValue Sh(0,0), Y(0,0); 1818193323Sed // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 1819193323Sed if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) && 1820193323Sed N0.getNode()->hasOneUse()) { 1821193323Sed Sh = N0; Y = N1; 1822193323Sed } else if (N1.getOpcode() == ISD::SHL && 1823193323Sed isa<ConstantSDNode>(N1.getOperand(1)) && 1824193323Sed N1.getNode()->hasOneUse()) { 1825193323Sed Sh = N1; Y = N0; 1826193323Sed } 1827193323Sed 1828193323Sed if (Sh.getNode()) { 1829193323Sed SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 1830193323Sed Sh.getOperand(0), Y); 1831193323Sed return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1832193323Sed Mul, Sh.getOperand(1)); 1833193323Sed } 1834193323Sed } 1835193323Sed 1836193323Sed // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 1837193323Sed if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 1838193323Sed isa<ConstantSDNode>(N0.getOperand(1))) 1839193323Sed return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, 1840193323Sed DAG.getNode(ISD::MUL, N0.getDebugLoc(), VT, 1841193323Sed N0.getOperand(0), N1), 1842193323Sed DAG.getNode(ISD::MUL, N1.getDebugLoc(), VT, 1843193323Sed N0.getOperand(1), N1)); 1844193323Sed 1845193323Sed // reassociate mul 1846193323Sed SDValue RMUL = ReassociateOps(ISD::MUL, N->getDebugLoc(), N0, N1); 1847193323Sed if (RMUL.getNode() != 0) 1848193323Sed return RMUL; 1849193323Sed 1850193323Sed return SDValue(); 1851193323Sed} 1852193323Sed 1853193323SedSDValue DAGCombiner::visitSDIV(SDNode *N) { 1854193323Sed SDValue N0 = N->getOperand(0); 1855193323Sed SDValue N1 = N->getOperand(1); 1856193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1857193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1858198090Srdivacky EVT VT = N->getValueType(0); 1859193323Sed 1860193323Sed // fold vector ops 1861193323Sed if (VT.isVector()) { 1862193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 1863193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 1864193323Sed } 1865193323Sed 1866193323Sed // fold (sdiv c1, c2) -> c1/c2 1867193323Sed if (N0C && N1C && !N1C->isNullValue()) 1868193323Sed return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C); 1869193323Sed // fold (sdiv X, 1) -> X 1870234353Sdim if (N1C && N1C->getAPIntValue() == 1LL) 1871193323Sed return N0; 1872193323Sed // fold (sdiv X, -1) -> 0-X 1873193323Sed if (N1C && N1C->isAllOnesValue()) 1874193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1875193323Sed DAG.getConstant(0, VT), N0); 1876193323Sed // If we know the sign bits of both operands are zero, strength reduce to a 1877193323Sed // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 1878193323Sed if (!VT.isVector()) { 1879193323Sed if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 1880193323Sed return DAG.getNode(ISD::UDIV, N->getDebugLoc(), N1.getValueType(), 1881193323Sed N0, N1); 1882193323Sed } 1883193323Sed // fold (sdiv X, pow2) -> simple ops after legalize 1884234353Sdim if (N1C && !N1C->isNullValue() && 1885234353Sdim (N1C->getAPIntValue().isPowerOf2() || 1886234353Sdim (-N1C->getAPIntValue()).isPowerOf2())) { 1887193323Sed // If dividing by powers of two is cheap, then don't perform the following 1888193323Sed // fold. 1889193323Sed if (TLI.isPow2DivCheap()) 1890193323Sed return SDValue(); 1891193323Sed 1892234353Sdim unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 1893193323Sed 1894193323Sed // Splat the sign bit into the register 1895193323Sed SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0, 1896193323Sed DAG.getConstant(VT.getSizeInBits()-1, 1897219077Sdim getShiftAmountTy(N0.getValueType()))); 1898193323Sed AddToWorkList(SGN.getNode()); 1899193323Sed 1900193323Sed // Add (N0 < 0) ? abs2 - 1 : 0; 1901193323Sed SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN, 1902193323Sed DAG.getConstant(VT.getSizeInBits() - lg2, 1903219077Sdim getShiftAmountTy(SGN.getValueType()))); 1904193323Sed SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL); 1905193323Sed AddToWorkList(SRL.getNode()); 1906193323Sed AddToWorkList(ADD.getNode()); // Divide by pow2 1907193323Sed SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD, 1908219077Sdim DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType()))); 1909193323Sed 1910193323Sed // If we're dividing by a positive value, we're done. Otherwise, we must 1911193323Sed // negate the result. 1912234353Sdim if (N1C->getAPIntValue().isNonNegative()) 1913193323Sed return SRA; 1914193323Sed 1915193323Sed AddToWorkList(SRA.getNode()); 1916193323Sed return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1917193323Sed DAG.getConstant(0, VT), SRA); 1918193323Sed } 1919193323Sed 1920193323Sed // if integer divide is expensive and we satisfy the requirements, emit an 1921193323Sed // alternate sequence. 1922234353Sdim if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) { 1923193323Sed SDValue Op = BuildSDIV(N); 1924193323Sed if (Op.getNode()) return Op; 1925193323Sed } 1926193323Sed 1927193323Sed // undef / X -> 0 1928193323Sed if (N0.getOpcode() == ISD::UNDEF) 1929193323Sed return DAG.getConstant(0, VT); 1930193323Sed // X / undef -> undef 1931193323Sed if (N1.getOpcode() == ISD::UNDEF) 1932193323Sed return N1; 1933193323Sed 1934193323Sed return SDValue(); 1935193323Sed} 1936193323Sed 1937193323SedSDValue DAGCombiner::visitUDIV(SDNode *N) { 1938193323Sed SDValue N0 = N->getOperand(0); 1939193323Sed SDValue N1 = N->getOperand(1); 1940193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1941193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1942198090Srdivacky EVT VT = N->getValueType(0); 1943193323Sed 1944193323Sed // fold vector ops 1945193323Sed if (VT.isVector()) { 1946193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 1947193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 1948193323Sed } 1949193323Sed 1950193323Sed // fold (udiv c1, c2) -> c1/c2 1951193323Sed if (N0C && N1C && !N1C->isNullValue()) 1952193323Sed return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C); 1953193323Sed // fold (udiv x, (1 << c)) -> x >>u c 1954193323Sed if (N1C && N1C->getAPIntValue().isPowerOf2()) 1955193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, 1956193323Sed DAG.getConstant(N1C->getAPIntValue().logBase2(), 1957219077Sdim getShiftAmountTy(N0.getValueType()))); 1958193323Sed // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 1959193323Sed if (N1.getOpcode() == ISD::SHL) { 1960193323Sed if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 1961193323Sed if (SHC->getAPIntValue().isPowerOf2()) { 1962198090Srdivacky EVT ADDVT = N1.getOperand(1).getValueType(); 1963193323Sed SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT, 1964193323Sed N1.getOperand(1), 1965193323Sed DAG.getConstant(SHC->getAPIntValue() 1966193323Sed .logBase2(), 1967193323Sed ADDVT)); 1968193323Sed AddToWorkList(Add.getNode()); 1969193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, Add); 1970193323Sed } 1971193323Sed } 1972193323Sed } 1973193323Sed // fold (udiv x, c) -> alternate 1974193323Sed if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) { 1975193323Sed SDValue Op = BuildUDIV(N); 1976193323Sed if (Op.getNode()) return Op; 1977193323Sed } 1978193323Sed 1979193323Sed // undef / X -> 0 1980193323Sed if (N0.getOpcode() == ISD::UNDEF) 1981193323Sed return DAG.getConstant(0, VT); 1982193323Sed // X / undef -> undef 1983193323Sed if (N1.getOpcode() == ISD::UNDEF) 1984193323Sed return N1; 1985193323Sed 1986193323Sed return SDValue(); 1987193323Sed} 1988193323Sed 1989193323SedSDValue DAGCombiner::visitSREM(SDNode *N) { 1990193323Sed SDValue N0 = N->getOperand(0); 1991193323Sed SDValue N1 = N->getOperand(1); 1992193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1993193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1994198090Srdivacky EVT VT = N->getValueType(0); 1995193323Sed 1996193323Sed // fold (srem c1, c2) -> c1%c2 1997193323Sed if (N0C && N1C && !N1C->isNullValue()) 1998193323Sed return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C); 1999193323Sed // If we know the sign bits of both operands are zero, strength reduce to a 2000193323Sed // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 2001193323Sed if (!VT.isVector()) { 2002193323Sed if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2003193323Sed return DAG.getNode(ISD::UREM, N->getDebugLoc(), VT, N0, N1); 2004193323Sed } 2005193323Sed 2006193323Sed // If X/C can be simplified by the division-by-constant logic, lower 2007193323Sed // X%C to the equivalent of X-X/C*C. 2008193323Sed if (N1C && !N1C->isNullValue()) { 2009193323Sed SDValue Div = DAG.getNode(ISD::SDIV, N->getDebugLoc(), VT, N0, N1); 2010193323Sed AddToWorkList(Div.getNode()); 2011193323Sed SDValue OptimizedDiv = combine(Div.getNode()); 2012193323Sed if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2013193323Sed SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 2014193323Sed OptimizedDiv, N1); 2015193323Sed SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul); 2016193323Sed AddToWorkList(Mul.getNode()); 2017193323Sed return Sub; 2018193323Sed } 2019193323Sed } 2020193323Sed 2021193323Sed // undef % X -> 0 2022193323Sed if (N0.getOpcode() == ISD::UNDEF) 2023193323Sed return DAG.getConstant(0, VT); 2024193323Sed // X % undef -> undef 2025193323Sed if (N1.getOpcode() == ISD::UNDEF) 2026193323Sed return N1; 2027193323Sed 2028193323Sed return SDValue(); 2029193323Sed} 2030193323Sed 2031193323SedSDValue DAGCombiner::visitUREM(SDNode *N) { 2032193323Sed SDValue N0 = N->getOperand(0); 2033193323Sed SDValue N1 = N->getOperand(1); 2034193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2035193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2036198090Srdivacky EVT VT = N->getValueType(0); 2037193323Sed 2038193323Sed // fold (urem c1, c2) -> c1%c2 2039193323Sed if (N0C && N1C && !N1C->isNullValue()) 2040193323Sed return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C); 2041193323Sed // fold (urem x, pow2) -> (and x, pow2-1) 2042193323Sed if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2()) 2043193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, 2044193323Sed DAG.getConstant(N1C->getAPIntValue()-1,VT)); 2045193323Sed // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2046193323Sed if (N1.getOpcode() == ISD::SHL) { 2047193323Sed if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 2048193323Sed if (SHC->getAPIntValue().isPowerOf2()) { 2049193323Sed SDValue Add = 2050193323Sed DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, 2051193323Sed DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), 2052193323Sed VT)); 2053193323Sed AddToWorkList(Add.getNode()); 2054193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, Add); 2055193323Sed } 2056193323Sed } 2057193323Sed } 2058193323Sed 2059193323Sed // If X/C can be simplified by the division-by-constant logic, lower 2060193323Sed // X%C to the equivalent of X-X/C*C. 2061193323Sed if (N1C && !N1C->isNullValue()) { 2062193323Sed SDValue Div = DAG.getNode(ISD::UDIV, N->getDebugLoc(), VT, N0, N1); 2063193323Sed AddToWorkList(Div.getNode()); 2064193323Sed SDValue OptimizedDiv = combine(Div.getNode()); 2065193323Sed if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2066193323Sed SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 2067193323Sed OptimizedDiv, N1); 2068193323Sed SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul); 2069193323Sed AddToWorkList(Mul.getNode()); 2070193323Sed return Sub; 2071193323Sed } 2072193323Sed } 2073193323Sed 2074193323Sed // undef % X -> 0 2075193323Sed if (N0.getOpcode() == ISD::UNDEF) 2076193323Sed return DAG.getConstant(0, VT); 2077193323Sed // X % undef -> undef 2078193323Sed if (N1.getOpcode() == ISD::UNDEF) 2079193323Sed return N1; 2080193323Sed 2081193323Sed return SDValue(); 2082193323Sed} 2083193323Sed 2084193323SedSDValue DAGCombiner::visitMULHS(SDNode *N) { 2085193323Sed SDValue N0 = N->getOperand(0); 2086193323Sed SDValue N1 = N->getOperand(1); 2087193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2088198090Srdivacky EVT VT = N->getValueType(0); 2089218893Sdim DebugLoc DL = N->getDebugLoc(); 2090193323Sed 2091193323Sed // fold (mulhs x, 0) -> 0 2092193323Sed if (N1C && N1C->isNullValue()) 2093193323Sed return N1; 2094193323Sed // fold (mulhs x, 1) -> (sra x, size(x)-1) 2095193323Sed if (N1C && N1C->getAPIntValue() == 1) 2096193323Sed return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0, 2097193323Sed DAG.getConstant(N0.getValueType().getSizeInBits() - 1, 2098219077Sdim getShiftAmountTy(N0.getValueType()))); 2099193323Sed // fold (mulhs x, undef) -> 0 2100193323Sed if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2101193323Sed return DAG.getConstant(0, VT); 2102193323Sed 2103218893Sdim // If the type twice as wide is legal, transform the mulhs to a wider multiply 2104218893Sdim // plus a shift. 2105218893Sdim if (VT.isSimple() && !VT.isVector()) { 2106218893Sdim MVT Simple = VT.getSimpleVT(); 2107218893Sdim unsigned SimpleSize = Simple.getSizeInBits(); 2108218893Sdim EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2109218893Sdim if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2110218893Sdim N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2111218893Sdim N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2112218893Sdim N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2113218893Sdim N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2114219077Sdim DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2115218893Sdim return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2116218893Sdim } 2117218893Sdim } 2118219077Sdim 2119193323Sed return SDValue(); 2120193323Sed} 2121193323Sed 2122193323SedSDValue DAGCombiner::visitMULHU(SDNode *N) { 2123193323Sed SDValue N0 = N->getOperand(0); 2124193323Sed SDValue N1 = N->getOperand(1); 2125193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2126198090Srdivacky EVT VT = N->getValueType(0); 2127218893Sdim DebugLoc DL = N->getDebugLoc(); 2128193323Sed 2129193323Sed // fold (mulhu x, 0) -> 0 2130193323Sed if (N1C && N1C->isNullValue()) 2131193323Sed return N1; 2132193323Sed // fold (mulhu x, 1) -> 0 2133193323Sed if (N1C && N1C->getAPIntValue() == 1) 2134193323Sed return DAG.getConstant(0, N0.getValueType()); 2135193323Sed // fold (mulhu x, undef) -> 0 2136193323Sed if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2137193323Sed return DAG.getConstant(0, VT); 2138193323Sed 2139218893Sdim // If the type twice as wide is legal, transform the mulhu to a wider multiply 2140218893Sdim // plus a shift. 2141218893Sdim if (VT.isSimple() && !VT.isVector()) { 2142218893Sdim MVT Simple = VT.getSimpleVT(); 2143218893Sdim unsigned SimpleSize = Simple.getSizeInBits(); 2144218893Sdim EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2145218893Sdim if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2146218893Sdim N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2147218893Sdim N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2148218893Sdim N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2149218893Sdim N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2150219077Sdim DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2151218893Sdim return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2152218893Sdim } 2153218893Sdim } 2154219077Sdim 2155193323Sed return SDValue(); 2156193323Sed} 2157193323Sed 2158193323Sed/// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that 2159193323Sed/// compute two values. LoOp and HiOp give the opcodes for the two computations 2160193323Sed/// that are being performed. Return true if a simplification was made. 2161193323Sed/// 2162193323SedSDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2163193323Sed unsigned HiOp) { 2164193323Sed // If the high half is not needed, just compute the low half. 2165193323Sed bool HiExists = N->hasAnyUseOfValue(1); 2166193323Sed if (!HiExists && 2167193323Sed (!LegalOperations || 2168193323Sed TLI.isOperationLegal(LoOp, N->getValueType(0)))) { 2169193323Sed SDValue Res = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0), 2170193323Sed N->op_begin(), N->getNumOperands()); 2171193323Sed return CombineTo(N, Res, Res); 2172193323Sed } 2173193323Sed 2174193323Sed // If the low half is not needed, just compute the high half. 2175193323Sed bool LoExists = N->hasAnyUseOfValue(0); 2176193323Sed if (!LoExists && 2177193323Sed (!LegalOperations || 2178193323Sed TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2179193323Sed SDValue Res = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1), 2180193323Sed N->op_begin(), N->getNumOperands()); 2181193323Sed return CombineTo(N, Res, Res); 2182193323Sed } 2183193323Sed 2184193323Sed // If both halves are used, return as it is. 2185193323Sed if (LoExists && HiExists) 2186193323Sed return SDValue(); 2187193323Sed 2188193323Sed // If the two computed results can be simplified separately, separate them. 2189193323Sed if (LoExists) { 2190193323Sed SDValue Lo = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0), 2191193323Sed N->op_begin(), N->getNumOperands()); 2192193323Sed AddToWorkList(Lo.getNode()); 2193193323Sed SDValue LoOpt = combine(Lo.getNode()); 2194193323Sed if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2195193323Sed (!LegalOperations || 2196193323Sed TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2197193323Sed return CombineTo(N, LoOpt, LoOpt); 2198193323Sed } 2199193323Sed 2200193323Sed if (HiExists) { 2201193323Sed SDValue Hi = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1), 2202193323Sed N->op_begin(), N->getNumOperands()); 2203193323Sed AddToWorkList(Hi.getNode()); 2204193323Sed SDValue HiOpt = combine(Hi.getNode()); 2205193323Sed if (HiOpt.getNode() && HiOpt != Hi && 2206193323Sed (!LegalOperations || 2207193323Sed TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2208193323Sed return CombineTo(N, HiOpt, HiOpt); 2209193323Sed } 2210193323Sed 2211193323Sed return SDValue(); 2212193323Sed} 2213193323Sed 2214193323SedSDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2215193323Sed SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS); 2216193323Sed if (Res.getNode()) return Res; 2217193323Sed 2218218893Sdim EVT VT = N->getValueType(0); 2219218893Sdim DebugLoc DL = N->getDebugLoc(); 2220218893Sdim 2221218893Sdim // If the type twice as wide is legal, transform the mulhu to a wider multiply 2222218893Sdim // plus a shift. 2223218893Sdim if (VT.isSimple() && !VT.isVector()) { 2224218893Sdim MVT Simple = VT.getSimpleVT(); 2225218893Sdim unsigned SimpleSize = Simple.getSizeInBits(); 2226218893Sdim EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2227218893Sdim if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2228218893Sdim SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2229218893Sdim SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2230218893Sdim Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2231218893Sdim // Compute the high part as N1. 2232218893Sdim Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2233219077Sdim DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2234218893Sdim Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2235218893Sdim // Compute the low part as N0. 2236218893Sdim Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2237218893Sdim return CombineTo(N, Lo, Hi); 2238218893Sdim } 2239218893Sdim } 2240219077Sdim 2241193323Sed return SDValue(); 2242193323Sed} 2243193323Sed 2244193323SedSDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2245193323Sed SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU); 2246193323Sed if (Res.getNode()) return Res; 2247193323Sed 2248218893Sdim EVT VT = N->getValueType(0); 2249218893Sdim DebugLoc DL = N->getDebugLoc(); 2250219077Sdim 2251218893Sdim // If the type twice as wide is legal, transform the mulhu to a wider multiply 2252218893Sdim // plus a shift. 2253218893Sdim if (VT.isSimple() && !VT.isVector()) { 2254218893Sdim MVT Simple = VT.getSimpleVT(); 2255218893Sdim unsigned SimpleSize = Simple.getSizeInBits(); 2256218893Sdim EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2257218893Sdim if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2258218893Sdim SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2259218893Sdim SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2260218893Sdim Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2261218893Sdim // Compute the high part as N1. 2262218893Sdim Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2263219077Sdim DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2264218893Sdim Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2265218893Sdim // Compute the low part as N0. 2266218893Sdim Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2267218893Sdim return CombineTo(N, Lo, Hi); 2268218893Sdim } 2269218893Sdim } 2270219077Sdim 2271193323Sed return SDValue(); 2272193323Sed} 2273193323Sed 2274223017SdimSDValue DAGCombiner::visitSMULO(SDNode *N) { 2275223017Sdim // (smulo x, 2) -> (saddo x, x) 2276223017Sdim if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2277223017Sdim if (C2->getAPIntValue() == 2) 2278223017Sdim return DAG.getNode(ISD::SADDO, N->getDebugLoc(), N->getVTList(), 2279223017Sdim N->getOperand(0), N->getOperand(0)); 2280223017Sdim 2281223017Sdim return SDValue(); 2282223017Sdim} 2283223017Sdim 2284223017SdimSDValue DAGCombiner::visitUMULO(SDNode *N) { 2285223017Sdim // (umulo x, 2) -> (uaddo x, x) 2286223017Sdim if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2287223017Sdim if (C2->getAPIntValue() == 2) 2288223017Sdim return DAG.getNode(ISD::UADDO, N->getDebugLoc(), N->getVTList(), 2289223017Sdim N->getOperand(0), N->getOperand(0)); 2290223017Sdim 2291223017Sdim return SDValue(); 2292223017Sdim} 2293223017Sdim 2294193323SedSDValue DAGCombiner::visitSDIVREM(SDNode *N) { 2295193323Sed SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM); 2296193323Sed if (Res.getNode()) return Res; 2297193323Sed 2298193323Sed return SDValue(); 2299193323Sed} 2300193323Sed 2301193323SedSDValue DAGCombiner::visitUDIVREM(SDNode *N) { 2302193323Sed SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM); 2303193323Sed if (Res.getNode()) return Res; 2304193323Sed 2305193323Sed return SDValue(); 2306193323Sed} 2307193323Sed 2308193323Sed/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with 2309193323Sed/// two operands of the same opcode, try to simplify it. 2310193323SedSDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 2311193323Sed SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2312198090Srdivacky EVT VT = N0.getValueType(); 2313193323Sed assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 2314193323Sed 2315202375Srdivacky // Bail early if none of these transforms apply. 2316202375Srdivacky if (N0.getNode()->getNumOperands() == 0) return SDValue(); 2317202375Srdivacky 2318193323Sed // For each of OP in AND/OR/XOR: 2319193323Sed // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 2320193323Sed // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 2321193323Sed // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 2322210299Sed // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 2323200581Srdivacky // 2324200581Srdivacky // do not sink logical op inside of a vector extend, since it may combine 2325200581Srdivacky // into a vsetcc. 2326202375Srdivacky EVT Op0VT = N0.getOperand(0).getValueType(); 2327202375Srdivacky if ((N0.getOpcode() == ISD::ZERO_EXTEND || 2328193323Sed N0.getOpcode() == ISD::SIGN_EXTEND || 2329207618Srdivacky // Avoid infinite looping with PromoteIntBinOp. 2330207618Srdivacky (N0.getOpcode() == ISD::ANY_EXTEND && 2331207618Srdivacky (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 2332210299Sed (N0.getOpcode() == ISD::TRUNCATE && 2333210299Sed (!TLI.isZExtFree(VT, Op0VT) || 2334210299Sed !TLI.isTruncateFree(Op0VT, VT)) && 2335210299Sed TLI.isTypeLegal(Op0VT))) && 2336200581Srdivacky !VT.isVector() && 2337202375Srdivacky Op0VT == N1.getOperand(0).getValueType() && 2338202375Srdivacky (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 2339193323Sed SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), 2340193323Sed N0.getOperand(0).getValueType(), 2341193323Sed N0.getOperand(0), N1.getOperand(0)); 2342193323Sed AddToWorkList(ORNode.getNode()); 2343193323Sed return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, ORNode); 2344193323Sed } 2345193323Sed 2346193323Sed // For each of OP in SHL/SRL/SRA/AND... 2347193323Sed // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 2348193323Sed // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 2349193323Sed // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 2350193323Sed if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 2351193323Sed N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 2352193323Sed N0.getOperand(1) == N1.getOperand(1)) { 2353193323Sed SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), 2354193323Sed N0.getOperand(0).getValueType(), 2355193323Sed N0.getOperand(0), N1.getOperand(0)); 2356193323Sed AddToWorkList(ORNode.getNode()); 2357193323Sed return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 2358193323Sed ORNode, N0.getOperand(1)); 2359193323Sed } 2360193323Sed 2361234353Sdim // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 2362234353Sdim // Only perform this optimization after type legalization and before 2363234353Sdim // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 2364234353Sdim // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 2365234353Sdim // we don't want to undo this promotion. 2366234353Sdim // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 2367234353Sdim // on scalars. 2368243830Sdim if ((N0.getOpcode() == ISD::BITCAST || 2369243830Sdim N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && 2370243830Sdim Level == AfterLegalizeTypes) { 2371234353Sdim SDValue In0 = N0.getOperand(0); 2372234353Sdim SDValue In1 = N1.getOperand(0); 2373234353Sdim EVT In0Ty = In0.getValueType(); 2374234353Sdim EVT In1Ty = In1.getValueType(); 2375243830Sdim DebugLoc DL = N->getDebugLoc(); 2376243830Sdim // If both incoming values are integers, and the original types are the 2377243830Sdim // same. 2378234353Sdim if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 2379243830Sdim SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); 2380243830Sdim SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); 2381234353Sdim AddToWorkList(Op.getNode()); 2382234353Sdim return BC; 2383234353Sdim } 2384234353Sdim } 2385234353Sdim 2386234353Sdim // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 2387234353Sdim // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 2388234353Sdim // If both shuffles use the same mask, and both shuffle within a single 2389234353Sdim // vector, then it is worthwhile to move the swizzle after the operation. 2390234353Sdim // The type-legalizer generates this pattern when loading illegal 2391234353Sdim // vector types from memory. In many cases this allows additional shuffle 2392234353Sdim // optimizations. 2393234353Sdim if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 2394234353Sdim N0.getOperand(1).getOpcode() == ISD::UNDEF && 2395234353Sdim N1.getOperand(1).getOpcode() == ISD::UNDEF) { 2396234353Sdim ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 2397234353Sdim ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 2398234353Sdim 2399234353Sdim assert(N0.getOperand(0).getValueType() == N1.getOperand(1).getValueType() && 2400234353Sdim "Inputs to shuffles are not the same type"); 2401234353Sdim 2402234353Sdim unsigned NumElts = VT.getVectorNumElements(); 2403234353Sdim 2404234353Sdim // Check that both shuffles use the same mask. The masks are known to be of 2405234353Sdim // the same length because the result vector type is the same. 2406234353Sdim bool SameMask = true; 2407234353Sdim for (unsigned i = 0; i != NumElts; ++i) { 2408234353Sdim int Idx0 = SVN0->getMaskElt(i); 2409234353Sdim int Idx1 = SVN1->getMaskElt(i); 2410234353Sdim if (Idx0 != Idx1) { 2411234353Sdim SameMask = false; 2412234353Sdim break; 2413234353Sdim } 2414234353Sdim } 2415234353Sdim 2416234353Sdim if (SameMask) { 2417234353Sdim SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, 2418234353Sdim N0.getOperand(0), N1.getOperand(0)); 2419234353Sdim AddToWorkList(Op.getNode()); 2420234353Sdim return DAG.getVectorShuffle(VT, N->getDebugLoc(), Op, 2421234353Sdim DAG.getUNDEF(VT), &SVN0->getMask()[0]); 2422234353Sdim } 2423234353Sdim } 2424234353Sdim 2425193323Sed return SDValue(); 2426193323Sed} 2427193323Sed 2428193323SedSDValue DAGCombiner::visitAND(SDNode *N) { 2429193323Sed SDValue N0 = N->getOperand(0); 2430193323Sed SDValue N1 = N->getOperand(1); 2431193323Sed SDValue LL, LR, RL, RR, CC0, CC1; 2432193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2433193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2434198090Srdivacky EVT VT = N1.getValueType(); 2435204792Srdivacky unsigned BitWidth = VT.getScalarType().getSizeInBits(); 2436193323Sed 2437193323Sed // fold vector ops 2438193323Sed if (VT.isVector()) { 2439193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 2440193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 2441249423Sdim 2442249423Sdim // fold (and x, 0) -> 0, vector edition 2443249423Sdim if (ISD::isBuildVectorAllZeros(N0.getNode())) 2444249423Sdim return N0; 2445249423Sdim if (ISD::isBuildVectorAllZeros(N1.getNode())) 2446249423Sdim return N1; 2447249423Sdim 2448249423Sdim // fold (and x, -1) -> x, vector edition 2449249423Sdim if (ISD::isBuildVectorAllOnes(N0.getNode())) 2450249423Sdim return N1; 2451249423Sdim if (ISD::isBuildVectorAllOnes(N1.getNode())) 2452249423Sdim return N0; 2453193323Sed } 2454193323Sed 2455193323Sed // fold (and x, undef) -> 0 2456193323Sed if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2457193323Sed return DAG.getConstant(0, VT); 2458193323Sed // fold (and c1, c2) -> c1&c2 2459193323Sed if (N0C && N1C) 2460193323Sed return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C); 2461193323Sed // canonicalize constant to RHS 2462193323Sed if (N0C && !N1C) 2463193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N1, N0); 2464193323Sed // fold (and x, -1) -> x 2465193323Sed if (N1C && N1C->isAllOnesValue()) 2466193323Sed return N0; 2467193323Sed // if (and x, c) is known to be zero, return 0 2468193323Sed if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 2469193323Sed APInt::getAllOnesValue(BitWidth))) 2470193323Sed return DAG.getConstant(0, VT); 2471193323Sed // reassociate and 2472193323Sed SDValue RAND = ReassociateOps(ISD::AND, N->getDebugLoc(), N0, N1); 2473193323Sed if (RAND.getNode() != 0) 2474193323Sed return RAND; 2475204642Srdivacky // fold (and (or x, C), D) -> D if (C & D) == D 2476193323Sed if (N1C && N0.getOpcode() == ISD::OR) 2477193323Sed if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 2478193323Sed if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 2479193323Sed return N1; 2480193323Sed // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 2481193323Sed if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 2482193323Sed SDValue N0Op0 = N0.getOperand(0); 2483193323Sed APInt Mask = ~N1C->getAPIntValue(); 2484218893Sdim Mask = Mask.trunc(N0Op0.getValueSizeInBits()); 2485193323Sed if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 2486193323Sed SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), 2487193323Sed N0.getValueType(), N0Op0); 2488193323Sed 2489193323Sed // Replace uses of the AND with uses of the Zero extend node. 2490193323Sed CombineTo(N, Zext); 2491193323Sed 2492193323Sed // We actually want to replace all uses of the any_extend with the 2493193323Sed // zero_extend, to avoid duplicating things. This will later cause this 2494193323Sed // AND to be folded. 2495193323Sed CombineTo(N0.getNode(), Zext); 2496193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 2497193323Sed } 2498193323Sed } 2499234353Sdim // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 2500234353Sdim // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 2501234353Sdim // already be zero by virtue of the width of the base type of the load. 2502234353Sdim // 2503234353Sdim // the 'X' node here can either be nothing or an extract_vector_elt to catch 2504234353Sdim // more cases. 2505234353Sdim if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 2506234353Sdim N0.getOperand(0).getOpcode() == ISD::LOAD) || 2507234353Sdim N0.getOpcode() == ISD::LOAD) { 2508234353Sdim LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 2509234353Sdim N0 : N0.getOperand(0) ); 2510234353Sdim 2511234353Sdim // Get the constant (if applicable) the zero'th operand is being ANDed with. 2512234353Sdim // This can be a pure constant or a vector splat, in which case we treat the 2513234353Sdim // vector as a scalar and use the splat value. 2514234353Sdim APInt Constant = APInt::getNullValue(1); 2515234353Sdim if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 2516234353Sdim Constant = C->getAPIntValue(); 2517234353Sdim } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 2518234353Sdim APInt SplatValue, SplatUndef; 2519234353Sdim unsigned SplatBitSize; 2520234353Sdim bool HasAnyUndefs; 2521234353Sdim bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 2522234353Sdim SplatBitSize, HasAnyUndefs); 2523234353Sdim if (IsSplat) { 2524234353Sdim // Undef bits can contribute to a possible optimisation if set, so 2525234353Sdim // set them. 2526234353Sdim SplatValue |= SplatUndef; 2527234353Sdim 2528234353Sdim // The splat value may be something like "0x00FFFFFF", which means 0 for 2529234353Sdim // the first vector value and FF for the rest, repeating. We need a mask 2530234353Sdim // that will apply equally to all members of the vector, so AND all the 2531234353Sdim // lanes of the constant together. 2532234353Sdim EVT VT = Vector->getValueType(0); 2533234353Sdim unsigned BitWidth = VT.getVectorElementType().getSizeInBits(); 2534243830Sdim 2535243830Sdim // If the splat value has been compressed to a bitlength lower 2536243830Sdim // than the size of the vector lane, we need to re-expand it to 2537243830Sdim // the lane size. 2538243830Sdim if (BitWidth > SplatBitSize) 2539243830Sdim for (SplatValue = SplatValue.zextOrTrunc(BitWidth); 2540243830Sdim SplatBitSize < BitWidth; 2541243830Sdim SplatBitSize = SplatBitSize * 2) 2542243830Sdim SplatValue |= SplatValue.shl(SplatBitSize); 2543243830Sdim 2544234353Sdim Constant = APInt::getAllOnesValue(BitWidth); 2545243830Sdim for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) 2546234353Sdim Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 2547234353Sdim } 2548234353Sdim } 2549234353Sdim 2550234353Sdim // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 2551234353Sdim // actually legal and isn't going to get expanded, else this is a false 2552234353Sdim // optimisation. 2553234353Sdim bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 2554234353Sdim Load->getMemoryVT()); 2555234353Sdim 2556234353Sdim // Resize the constant to the same size as the original memory access before 2557234353Sdim // extension. If it is still the AllOnesValue then this AND is completely 2558234353Sdim // unneeded. 2559234353Sdim Constant = 2560234353Sdim Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits()); 2561234353Sdim 2562234353Sdim bool B; 2563234353Sdim switch (Load->getExtensionType()) { 2564234353Sdim default: B = false; break; 2565234353Sdim case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 2566234353Sdim case ISD::ZEXTLOAD: 2567234353Sdim case ISD::NON_EXTLOAD: B = true; break; 2568234353Sdim } 2569234353Sdim 2570234353Sdim if (B && Constant.isAllOnesValue()) { 2571234353Sdim // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 2572234353Sdim // preserve semantics once we get rid of the AND. 2573234353Sdim SDValue NewLoad(Load, 0); 2574234353Sdim if (Load->getExtensionType() == ISD::EXTLOAD) { 2575234353Sdim NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 2576234353Sdim Load->getValueType(0), Load->getDebugLoc(), 2577234353Sdim Load->getChain(), Load->getBasePtr(), 2578234353Sdim Load->getOffset(), Load->getMemoryVT(), 2579234353Sdim Load->getMemOperand()); 2580234353Sdim // Replace uses of the EXTLOAD with the new ZEXTLOAD. 2581239462Sdim if (Load->getNumValues() == 3) { 2582239462Sdim // PRE/POST_INC loads have 3 values. 2583239462Sdim SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 2584239462Sdim NewLoad.getValue(2) }; 2585239462Sdim CombineTo(Load, To, 3, true); 2586239462Sdim } else { 2587239462Sdim CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 2588239462Sdim } 2589234353Sdim } 2590234353Sdim 2591234353Sdim // Fold the AND away, taking care not to fold to the old load node if we 2592234353Sdim // replaced it. 2593234353Sdim CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 2594234353Sdim 2595234353Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 2596234353Sdim } 2597234353Sdim } 2598193323Sed // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) 2599193323Sed if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 2600193323Sed ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 2601193323Sed ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 2602193323Sed 2603193323Sed if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 2604193323Sed LL.getValueType().isInteger()) { 2605193323Sed // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0) 2606193323Sed if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) { 2607193323Sed SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(), 2608193323Sed LR.getValueType(), LL, RL); 2609193323Sed AddToWorkList(ORNode.getNode()); 2610193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 2611193323Sed } 2612193323Sed // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1) 2613193323Sed if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) { 2614193323Sed SDValue ANDNode = DAG.getNode(ISD::AND, N0.getDebugLoc(), 2615193323Sed LR.getValueType(), LL, RL); 2616193323Sed AddToWorkList(ANDNode.getNode()); 2617193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1); 2618193323Sed } 2619193323Sed // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1) 2620193323Sed if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) { 2621193323Sed SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(), 2622193323Sed LR.getValueType(), LL, RL); 2623193323Sed AddToWorkList(ORNode.getNode()); 2624193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 2625193323Sed } 2626193323Sed } 2627193323Sed // canonicalize equivalent to ll == rl 2628193323Sed if (LL == RR && LR == RL) { 2629193323Sed Op1 = ISD::getSetCCSwappedOperands(Op1); 2630193323Sed std::swap(RL, RR); 2631193323Sed } 2632193323Sed if (LL == RL && LR == RR) { 2633193323Sed bool isInteger = LL.getValueType().isInteger(); 2634193323Sed ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); 2635193323Sed if (Result != ISD::SETCC_INVALID && 2636249423Sdim (!LegalOperations || 2637249423Sdim (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 2638249423Sdim TLI.isOperationLegal(ISD::SETCC, 2639249423Sdim TLI.getSetCCResultType(N0.getSimpleValueType()))))) 2640193323Sed return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(), 2641193323Sed LL, LR, Result); 2642193323Sed } 2643193323Sed } 2644193323Sed 2645193323Sed // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 2646193323Sed if (N0.getOpcode() == N1.getOpcode()) { 2647193323Sed SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 2648193323Sed if (Tmp.getNode()) return Tmp; 2649193323Sed } 2650193323Sed 2651193323Sed // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 2652193323Sed // fold (and (sra)) -> (and (srl)) when possible. 2653193323Sed if (!VT.isVector() && 2654193323Sed SimplifyDemandedBits(SDValue(N, 0))) 2655193323Sed return SDValue(N, 0); 2656202375Srdivacky 2657193323Sed // fold (zext_inreg (extload x)) -> (zextload x) 2658193323Sed if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 2659193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2660198090Srdivacky EVT MemVT = LN0->getMemoryVT(); 2661193323Sed // If we zero all the possible extended bits, then we can turn this into 2662193323Sed // a zextload if we are running before legalize or the operation is legal. 2663204792Srdivacky unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2664193323Sed if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2665204792Srdivacky BitWidth - MemVT.getScalarType().getSizeInBits())) && 2666193323Sed ((!LegalOperations && !LN0->isVolatile()) || 2667198090Srdivacky TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2668218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT, 2669193323Sed LN0->getChain(), LN0->getBasePtr(), 2670218893Sdim LN0->getPointerInfo(), MemVT, 2671203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 2672203954Srdivacky LN0->getAlignment()); 2673193323Sed AddToWorkList(N); 2674193323Sed CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2675193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 2676193323Sed } 2677193323Sed } 2678193323Sed // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 2679193323Sed if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 2680193323Sed N0.hasOneUse()) { 2681193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2682198090Srdivacky EVT MemVT = LN0->getMemoryVT(); 2683193323Sed // If we zero all the possible extended bits, then we can turn this into 2684193323Sed // a zextload if we are running before legalize or the operation is legal. 2685204792Srdivacky unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2686193323Sed if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2687204792Srdivacky BitWidth - MemVT.getScalarType().getSizeInBits())) && 2688193323Sed ((!LegalOperations && !LN0->isVolatile()) || 2689198090Srdivacky TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2690218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT, 2691193323Sed LN0->getChain(), 2692218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 2693218893Sdim MemVT, 2694203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 2695203954Srdivacky LN0->getAlignment()); 2696193323Sed AddToWorkList(N); 2697193323Sed CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2698193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 2699193323Sed } 2700193323Sed } 2701193323Sed 2702193323Sed // fold (and (load x), 255) -> (zextload x, i8) 2703193323Sed // fold (and (extload x, i16), 255) -> (zextload x, i8) 2704202375Srdivacky // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 2705202375Srdivacky if (N1C && (N0.getOpcode() == ISD::LOAD || 2706202375Srdivacky (N0.getOpcode() == ISD::ANY_EXTEND && 2707202375Srdivacky N0.getOperand(0).getOpcode() == ISD::LOAD))) { 2708202375Srdivacky bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 2709202375Srdivacky LoadSDNode *LN0 = HasAnyExt 2710202375Srdivacky ? cast<LoadSDNode>(N0.getOperand(0)) 2711202375Srdivacky : cast<LoadSDNode>(N0); 2712193323Sed if (LN0->getExtensionType() != ISD::SEXTLOAD && 2713202375Srdivacky LN0->isUnindexed() && N0.hasOneUse() && LN0->hasOneUse()) { 2714193323Sed uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits(); 2715202375Srdivacky if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){ 2716202375Srdivacky EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 2717202375Srdivacky EVT LoadedVT = LN0->getMemoryVT(); 2718193323Sed 2719202375Srdivacky if (ExtVT == LoadedVT && 2720202375Srdivacky (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2721202375Srdivacky EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2722218893Sdim 2723218893Sdim SDValue NewLoad = 2724218893Sdim DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy, 2725202375Srdivacky LN0->getChain(), LN0->getBasePtr(), 2726218893Sdim LN0->getPointerInfo(), 2727203954Srdivacky ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 2728203954Srdivacky LN0->getAlignment()); 2729202375Srdivacky AddToWorkList(N); 2730202375Srdivacky CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 2731202375Srdivacky return SDValue(N, 0); // Return N so it doesn't get rechecked! 2732202375Srdivacky } 2733218893Sdim 2734202375Srdivacky // Do not change the width of a volatile load. 2735202375Srdivacky // Do not generate loads of non-round integer types since these can 2736202375Srdivacky // be expensive (and would be wrong if the type is not byte sized). 2737202375Srdivacky if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() && 2738202375Srdivacky (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2739202375Srdivacky EVT PtrType = LN0->getOperand(1).getValueType(); 2740193323Sed 2741202375Srdivacky unsigned Alignment = LN0->getAlignment(); 2742202375Srdivacky SDValue NewPtr = LN0->getBasePtr(); 2743193323Sed 2744202375Srdivacky // For big endian targets, we need to add an offset to the pointer 2745202375Srdivacky // to load the correct bytes. For little endian systems, we merely 2746202375Srdivacky // need to read fewer bytes from the same pointer. 2747202375Srdivacky if (TLI.isBigEndian()) { 2748202375Srdivacky unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 2749202375Srdivacky unsigned EVTStoreBytes = ExtVT.getStoreSize(); 2750202375Srdivacky unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 2751202375Srdivacky NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), PtrType, 2752202375Srdivacky NewPtr, DAG.getConstant(PtrOff, PtrType)); 2753202375Srdivacky Alignment = MinAlign(Alignment, PtrOff); 2754202375Srdivacky } 2755193323Sed 2756202375Srdivacky AddToWorkList(NewPtr.getNode()); 2757218893Sdim 2758202375Srdivacky EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2759202375Srdivacky SDValue Load = 2760218893Sdim DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy, 2761202375Srdivacky LN0->getChain(), NewPtr, 2762218893Sdim LN0->getPointerInfo(), 2763203954Srdivacky ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 2764203954Srdivacky Alignment); 2765202375Srdivacky AddToWorkList(N); 2766202375Srdivacky CombineTo(LN0, Load, Load.getValue(1)); 2767202375Srdivacky return SDValue(N, 0); // Return N so it doesn't get rechecked! 2768193323Sed } 2769193323Sed } 2770193323Sed } 2771193323Sed } 2772193323Sed 2773239462Sdim if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 2774239462Sdim VT.getSizeInBits() <= 64) { 2775239462Sdim if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2776239462Sdim APInt ADDC = ADDI->getAPIntValue(); 2777239462Sdim if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 2778239462Sdim // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 2779239462Sdim // immediate for an add, but it is legal if its top c2 bits are set, 2780239462Sdim // transform the ADD so the immediate doesn't need to be materialized 2781239462Sdim // in a register. 2782239462Sdim if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 2783239462Sdim APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 2784239462Sdim SRLI->getZExtValue()); 2785239462Sdim if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 2786239462Sdim ADDC |= Mask; 2787239462Sdim if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 2788239462Sdim SDValue NewAdd = 2789239462Sdim DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, 2790239462Sdim N0.getOperand(0), DAG.getConstant(ADDC, VT)); 2791239462Sdim CombineTo(N0.getNode(), NewAdd); 2792239462Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 2793239462Sdim } 2794239462Sdim } 2795239462Sdim } 2796239462Sdim } 2797239462Sdim } 2798239462Sdim } 2799239462Sdim 2800193323Sed return SDValue(); 2801193323Sed} 2802193323Sed 2803224145Sdim/// MatchBSwapHWord - Match (a >> 8) | (a << 8) as (bswap a) >> 16 2804224145Sdim/// 2805224145SdimSDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 2806224145Sdim bool DemandHighBits) { 2807224145Sdim if (!LegalOperations) 2808224145Sdim return SDValue(); 2809224145Sdim 2810224145Sdim EVT VT = N->getValueType(0); 2811224145Sdim if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 2812224145Sdim return SDValue(); 2813224145Sdim if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 2814224145Sdim return SDValue(); 2815224145Sdim 2816224145Sdim // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 2817224145Sdim bool LookPassAnd0 = false; 2818224145Sdim bool LookPassAnd1 = false; 2819224145Sdim if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 2820224145Sdim std::swap(N0, N1); 2821224145Sdim if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 2822224145Sdim std::swap(N0, N1); 2823224145Sdim if (N0.getOpcode() == ISD::AND) { 2824224145Sdim if (!N0.getNode()->hasOneUse()) 2825224145Sdim return SDValue(); 2826224145Sdim ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2827224145Sdim if (!N01C || N01C->getZExtValue() != 0xFF00) 2828224145Sdim return SDValue(); 2829224145Sdim N0 = N0.getOperand(0); 2830224145Sdim LookPassAnd0 = true; 2831224145Sdim } 2832224145Sdim 2833224145Sdim if (N1.getOpcode() == ISD::AND) { 2834224145Sdim if (!N1.getNode()->hasOneUse()) 2835224145Sdim return SDValue(); 2836224145Sdim ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 2837224145Sdim if (!N11C || N11C->getZExtValue() != 0xFF) 2838224145Sdim return SDValue(); 2839224145Sdim N1 = N1.getOperand(0); 2840224145Sdim LookPassAnd1 = true; 2841224145Sdim } 2842224145Sdim 2843224145Sdim if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 2844224145Sdim std::swap(N0, N1); 2845224145Sdim if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 2846224145Sdim return SDValue(); 2847224145Sdim if (!N0.getNode()->hasOneUse() || 2848224145Sdim !N1.getNode()->hasOneUse()) 2849224145Sdim return SDValue(); 2850224145Sdim 2851224145Sdim ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2852224145Sdim ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 2853224145Sdim if (!N01C || !N11C) 2854224145Sdim return SDValue(); 2855224145Sdim if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 2856224145Sdim return SDValue(); 2857224145Sdim 2858224145Sdim // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 2859224145Sdim SDValue N00 = N0->getOperand(0); 2860224145Sdim if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 2861224145Sdim if (!N00.getNode()->hasOneUse()) 2862224145Sdim return SDValue(); 2863224145Sdim ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 2864224145Sdim if (!N001C || N001C->getZExtValue() != 0xFF) 2865224145Sdim return SDValue(); 2866224145Sdim N00 = N00.getOperand(0); 2867224145Sdim LookPassAnd0 = true; 2868224145Sdim } 2869224145Sdim 2870224145Sdim SDValue N10 = N1->getOperand(0); 2871224145Sdim if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 2872224145Sdim if (!N10.getNode()->hasOneUse()) 2873224145Sdim return SDValue(); 2874224145Sdim ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 2875224145Sdim if (!N101C || N101C->getZExtValue() != 0xFF00) 2876224145Sdim return SDValue(); 2877224145Sdim N10 = N10.getOperand(0); 2878224145Sdim LookPassAnd1 = true; 2879224145Sdim } 2880224145Sdim 2881224145Sdim if (N00 != N10) 2882224145Sdim return SDValue(); 2883224145Sdim 2884224145Sdim // Make sure everything beyond the low halfword is zero since the SRL 16 2885224145Sdim // will clear the top bits. 2886224145Sdim unsigned OpSizeInBits = VT.getSizeInBits(); 2887224145Sdim if (DemandHighBits && OpSizeInBits > 16 && 2888224145Sdim (!LookPassAnd0 || !LookPassAnd1) && 2889224145Sdim !DAG.MaskedValueIsZero(N10, APInt::getHighBitsSet(OpSizeInBits, 16))) 2890224145Sdim return SDValue(); 2891224145Sdim 2892224145Sdim SDValue Res = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT, N00); 2893224145Sdim if (OpSizeInBits > 16) 2894224145Sdim Res = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Res, 2895224145Sdim DAG.getConstant(OpSizeInBits-16, getShiftAmountTy(VT))); 2896224145Sdim return Res; 2897224145Sdim} 2898224145Sdim 2899224145Sdim/// isBSwapHWordElement - Return true if the specified node is an element 2900224145Sdim/// that makes up a 32-bit packed halfword byteswap. i.e. 2901224145Sdim/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 2902224145Sdimstatic bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) { 2903224145Sdim if (!N.getNode()->hasOneUse()) 2904224145Sdim return false; 2905224145Sdim 2906224145Sdim unsigned Opc = N.getOpcode(); 2907224145Sdim if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 2908224145Sdim return false; 2909224145Sdim 2910224145Sdim ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2911224145Sdim if (!N1C) 2912224145Sdim return false; 2913224145Sdim 2914224145Sdim unsigned Num; 2915224145Sdim switch (N1C->getZExtValue()) { 2916224145Sdim default: 2917224145Sdim return false; 2918224145Sdim case 0xFF: Num = 0; break; 2919224145Sdim case 0xFF00: Num = 1; break; 2920224145Sdim case 0xFF0000: Num = 2; break; 2921224145Sdim case 0xFF000000: Num = 3; break; 2922224145Sdim } 2923224145Sdim 2924224145Sdim // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 2925224145Sdim SDValue N0 = N.getOperand(0); 2926224145Sdim if (Opc == ISD::AND) { 2927224145Sdim if (Num == 0 || Num == 2) { 2928224145Sdim // (x >> 8) & 0xff 2929224145Sdim // (x >> 8) & 0xff0000 2930224145Sdim if (N0.getOpcode() != ISD::SRL) 2931224145Sdim return false; 2932224145Sdim ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2933224145Sdim if (!C || C->getZExtValue() != 8) 2934224145Sdim return false; 2935224145Sdim } else { 2936224145Sdim // (x << 8) & 0xff00 2937224145Sdim // (x << 8) & 0xff000000 2938224145Sdim if (N0.getOpcode() != ISD::SHL) 2939224145Sdim return false; 2940224145Sdim ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2941224145Sdim if (!C || C->getZExtValue() != 8) 2942224145Sdim return false; 2943224145Sdim } 2944224145Sdim } else if (Opc == ISD::SHL) { 2945224145Sdim // (x & 0xff) << 8 2946224145Sdim // (x & 0xff0000) << 8 2947224145Sdim if (Num != 0 && Num != 2) 2948224145Sdim return false; 2949224145Sdim ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2950224145Sdim if (!C || C->getZExtValue() != 8) 2951224145Sdim return false; 2952224145Sdim } else { // Opc == ISD::SRL 2953224145Sdim // (x & 0xff00) >> 8 2954224145Sdim // (x & 0xff000000) >> 8 2955224145Sdim if (Num != 1 && Num != 3) 2956224145Sdim return false; 2957224145Sdim ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2958224145Sdim if (!C || C->getZExtValue() != 8) 2959224145Sdim return false; 2960224145Sdim } 2961224145Sdim 2962224145Sdim if (Parts[Num]) 2963224145Sdim return false; 2964224145Sdim 2965224145Sdim Parts[Num] = N0.getOperand(0).getNode(); 2966224145Sdim return true; 2967224145Sdim} 2968224145Sdim 2969224145Sdim/// MatchBSwapHWord - Match a 32-bit packed halfword bswap. That is 2970224145Sdim/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 2971224145Sdim/// => (rotl (bswap x), 16) 2972224145SdimSDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 2973224145Sdim if (!LegalOperations) 2974224145Sdim return SDValue(); 2975224145Sdim 2976224145Sdim EVT VT = N->getValueType(0); 2977224145Sdim if (VT != MVT::i32) 2978224145Sdim return SDValue(); 2979224145Sdim if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 2980224145Sdim return SDValue(); 2981224145Sdim 2982224145Sdim SmallVector<SDNode*,4> Parts(4, (SDNode*)0); 2983224145Sdim // Look for either 2984224145Sdim // (or (or (and), (and)), (or (and), (and))) 2985224145Sdim // (or (or (or (and), (and)), (and)), (and)) 2986224145Sdim if (N0.getOpcode() != ISD::OR) 2987224145Sdim return SDValue(); 2988224145Sdim SDValue N00 = N0.getOperand(0); 2989224145Sdim SDValue N01 = N0.getOperand(1); 2990224145Sdim 2991249423Sdim if (N1.getOpcode() == ISD::OR && 2992249423Sdim N00.getNumOperands() == 2 && N01.getNumOperands() == 2) { 2993224145Sdim // (or (or (and), (and)), (or (and), (and))) 2994224145Sdim SDValue N000 = N00.getOperand(0); 2995224145Sdim if (!isBSwapHWordElement(N000, Parts)) 2996224145Sdim return SDValue(); 2997224145Sdim 2998224145Sdim SDValue N001 = N00.getOperand(1); 2999224145Sdim if (!isBSwapHWordElement(N001, Parts)) 3000224145Sdim return SDValue(); 3001224145Sdim SDValue N010 = N01.getOperand(0); 3002224145Sdim if (!isBSwapHWordElement(N010, Parts)) 3003224145Sdim return SDValue(); 3004224145Sdim SDValue N011 = N01.getOperand(1); 3005224145Sdim if (!isBSwapHWordElement(N011, Parts)) 3006224145Sdim return SDValue(); 3007224145Sdim } else { 3008224145Sdim // (or (or (or (and), (and)), (and)), (and)) 3009224145Sdim if (!isBSwapHWordElement(N1, Parts)) 3010224145Sdim return SDValue(); 3011224145Sdim if (!isBSwapHWordElement(N01, Parts)) 3012224145Sdim return SDValue(); 3013224145Sdim if (N00.getOpcode() != ISD::OR) 3014224145Sdim return SDValue(); 3015224145Sdim SDValue N000 = N00.getOperand(0); 3016224145Sdim if (!isBSwapHWordElement(N000, Parts)) 3017224145Sdim return SDValue(); 3018224145Sdim SDValue N001 = N00.getOperand(1); 3019224145Sdim if (!isBSwapHWordElement(N001, Parts)) 3020224145Sdim return SDValue(); 3021224145Sdim } 3022224145Sdim 3023224145Sdim // Make sure the parts are all coming from the same node. 3024224145Sdim if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 3025224145Sdim return SDValue(); 3026224145Sdim 3027224145Sdim SDValue BSwap = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT, 3028224145Sdim SDValue(Parts[0],0)); 3029224145Sdim 3030224145Sdim // Result of the bswap should be rotated by 16. If it's not legal, than 3031224145Sdim // do (x << 16) | (x >> 16). 3032224145Sdim SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT)); 3033224145Sdim if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 3034224145Sdim return DAG.getNode(ISD::ROTL, N->getDebugLoc(), VT, BSwap, ShAmt); 3035243830Sdim if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 3036224145Sdim return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, BSwap, ShAmt); 3037224145Sdim return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, 3038224145Sdim DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, BSwap, ShAmt), 3039224145Sdim DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, BSwap, ShAmt)); 3040224145Sdim} 3041224145Sdim 3042193323SedSDValue DAGCombiner::visitOR(SDNode *N) { 3043193323Sed SDValue N0 = N->getOperand(0); 3044193323Sed SDValue N1 = N->getOperand(1); 3045193323Sed SDValue LL, LR, RL, RR, CC0, CC1; 3046193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3047193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3048198090Srdivacky EVT VT = N1.getValueType(); 3049193323Sed 3050193323Sed // fold vector ops 3051193323Sed if (VT.isVector()) { 3052193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 3053193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 3054249423Sdim 3055249423Sdim // fold (or x, 0) -> x, vector edition 3056249423Sdim if (ISD::isBuildVectorAllZeros(N0.getNode())) 3057249423Sdim return N1; 3058249423Sdim if (ISD::isBuildVectorAllZeros(N1.getNode())) 3059249423Sdim return N0; 3060249423Sdim 3061249423Sdim // fold (or x, -1) -> -1, vector edition 3062249423Sdim if (ISD::isBuildVectorAllOnes(N0.getNode())) 3063249423Sdim return N0; 3064249423Sdim if (ISD::isBuildVectorAllOnes(N1.getNode())) 3065249423Sdim return N1; 3066193323Sed } 3067193323Sed 3068193323Sed // fold (or x, undef) -> -1 3069210299Sed if (!LegalOperations && 3070210299Sed (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) { 3071200581Srdivacky EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; 3072200581Srdivacky return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 3073200581Srdivacky } 3074193323Sed // fold (or c1, c2) -> c1|c2 3075193323Sed if (N0C && N1C) 3076193323Sed return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C); 3077193323Sed // canonicalize constant to RHS 3078193323Sed if (N0C && !N1C) 3079193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N1, N0); 3080193323Sed // fold (or x, 0) -> x 3081193323Sed if (N1C && N1C->isNullValue()) 3082193323Sed return N0; 3083193323Sed // fold (or x, -1) -> -1 3084193323Sed if (N1C && N1C->isAllOnesValue()) 3085193323Sed return N1; 3086193323Sed // fold (or x, c) -> c iff (x & ~c) == 0 3087193323Sed if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 3088193323Sed return N1; 3089224145Sdim 3090224145Sdim // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 3091224145Sdim SDValue BSwap = MatchBSwapHWord(N, N0, N1); 3092224145Sdim if (BSwap.getNode() != 0) 3093224145Sdim return BSwap; 3094224145Sdim BSwap = MatchBSwapHWordLow(N, N0, N1); 3095224145Sdim if (BSwap.getNode() != 0) 3096224145Sdim return BSwap; 3097224145Sdim 3098193323Sed // reassociate or 3099193323Sed SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1); 3100193323Sed if (ROR.getNode() != 0) 3101193323Sed return ROR; 3102193323Sed // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 3103204642Srdivacky // iff (c1 & c2) == 0. 3104193323Sed if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3105193323Sed isa<ConstantSDNode>(N0.getOperand(1))) { 3106193323Sed ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 3107204642Srdivacky if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) 3108204642Srdivacky return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 3109204642Srdivacky DAG.getNode(ISD::OR, N0.getDebugLoc(), VT, 3110204642Srdivacky N0.getOperand(0), N1), 3111204642Srdivacky DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1)); 3112193323Sed } 3113193323Sed // fold (or (setcc x), (setcc y)) -> (setcc (or x, y)) 3114193323Sed if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 3115193323Sed ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 3116193323Sed ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 3117193323Sed 3118193323Sed if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 3119193323Sed LL.getValueType().isInteger()) { 3120193323Sed // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0) 3121193323Sed // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0) 3122193323Sed if (cast<ConstantSDNode>(LR)->isNullValue() && 3123193323Sed (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { 3124193323Sed SDValue ORNode = DAG.getNode(ISD::OR, LR.getDebugLoc(), 3125193323Sed LR.getValueType(), LL, RL); 3126193323Sed AddToWorkList(ORNode.getNode()); 3127193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 3128193323Sed } 3129193323Sed // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1) 3130193323Sed // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1) 3131193323Sed if (cast<ConstantSDNode>(LR)->isAllOnesValue() && 3132193323Sed (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { 3133193323Sed SDValue ANDNode = DAG.getNode(ISD::AND, LR.getDebugLoc(), 3134193323Sed LR.getValueType(), LL, RL); 3135193323Sed AddToWorkList(ANDNode.getNode()); 3136193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1); 3137193323Sed } 3138193323Sed } 3139193323Sed // canonicalize equivalent to ll == rl 3140193323Sed if (LL == RR && LR == RL) { 3141193323Sed Op1 = ISD::getSetCCSwappedOperands(Op1); 3142193323Sed std::swap(RL, RR); 3143193323Sed } 3144193323Sed if (LL == RL && LR == RR) { 3145193323Sed bool isInteger = LL.getValueType().isInteger(); 3146193323Sed ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); 3147193323Sed if (Result != ISD::SETCC_INVALID && 3148249423Sdim (!LegalOperations || 3149249423Sdim (TLI.isCondCodeLegal(Result, LL.getSimpleValueType()) && 3150249423Sdim TLI.isOperationLegal(ISD::SETCC, 3151249423Sdim TLI.getSetCCResultType(N0.getValueType()))))) 3152193323Sed return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(), 3153193323Sed LL, LR, Result); 3154193323Sed } 3155193323Sed } 3156193323Sed 3157193323Sed // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 3158193323Sed if (N0.getOpcode() == N1.getOpcode()) { 3159193323Sed SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3160193323Sed if (Tmp.getNode()) return Tmp; 3161193323Sed } 3162193323Sed 3163193323Sed // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 3164193323Sed if (N0.getOpcode() == ISD::AND && 3165193323Sed N1.getOpcode() == ISD::AND && 3166193323Sed N0.getOperand(1).getOpcode() == ISD::Constant && 3167193323Sed N1.getOperand(1).getOpcode() == ISD::Constant && 3168193323Sed // Don't increase # computations. 3169193323Sed (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3170193323Sed // We can only do this xform if we know that bits from X that are set in C2 3171193323Sed // but not in C1 are already zero. Likewise for Y. 3172193323Sed const APInt &LHSMask = 3173193323Sed cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 3174193323Sed const APInt &RHSMask = 3175193323Sed cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue(); 3176193323Sed 3177193323Sed if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 3178193323Sed DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 3179193323Sed SDValue X = DAG.getNode(ISD::OR, N0.getDebugLoc(), VT, 3180193323Sed N0.getOperand(0), N1.getOperand(0)); 3181193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, X, 3182193323Sed DAG.getConstant(LHSMask | RHSMask, VT)); 3183193323Sed } 3184193323Sed } 3185193323Sed 3186193323Sed // See if this is some rotate idiom. 3187193323Sed if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc())) 3188193323Sed return SDValue(Rot, 0); 3189193323Sed 3190210299Sed // Simplify the operands using demanded-bits information. 3191210299Sed if (!VT.isVector() && 3192210299Sed SimplifyDemandedBits(SDValue(N, 0))) 3193210299Sed return SDValue(N, 0); 3194210299Sed 3195193323Sed return SDValue(); 3196193323Sed} 3197193323Sed 3198193323Sed/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present. 3199193323Sedstatic bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 3200193323Sed if (Op.getOpcode() == ISD::AND) { 3201193323Sed if (isa<ConstantSDNode>(Op.getOperand(1))) { 3202193323Sed Mask = Op.getOperand(1); 3203193323Sed Op = Op.getOperand(0); 3204193323Sed } else { 3205193323Sed return false; 3206193323Sed } 3207193323Sed } 3208193323Sed 3209193323Sed if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 3210193323Sed Shift = Op; 3211193323Sed return true; 3212193323Sed } 3213193323Sed 3214193323Sed return false; 3215193323Sed} 3216193323Sed 3217193323Sed// MatchRotate - Handle an 'or' of two operands. If this is one of the many 3218193323Sed// idioms for rotate, and if the target supports rotation instructions, generate 3219193323Sed// a rot[lr]. 3220193323SedSDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) { 3221193323Sed // Must be a legal type. Expanded 'n promoted things won't work with rotates. 3222198090Srdivacky EVT VT = LHS.getValueType(); 3223193323Sed if (!TLI.isTypeLegal(VT)) return 0; 3224193323Sed 3225193323Sed // The target must have at least one rotate flavor. 3226193323Sed bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 3227193323Sed bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 3228193323Sed if (!HasROTL && !HasROTR) return 0; 3229193323Sed 3230193323Sed // Match "(X shl/srl V1) & V2" where V2 may not be present. 3231193323Sed SDValue LHSShift; // The shift. 3232193323Sed SDValue LHSMask; // AND value if any. 3233193323Sed if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 3234193323Sed return 0; // Not part of a rotate. 3235193323Sed 3236193323Sed SDValue RHSShift; // The shift. 3237193323Sed SDValue RHSMask; // AND value if any. 3238193323Sed if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 3239193323Sed return 0; // Not part of a rotate. 3240193323Sed 3241193323Sed if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 3242193323Sed return 0; // Not shifting the same value. 3243193323Sed 3244193323Sed if (LHSShift.getOpcode() == RHSShift.getOpcode()) 3245193323Sed return 0; // Shifts must disagree. 3246193323Sed 3247193323Sed // Canonicalize shl to left side in a shl/srl pair. 3248193323Sed if (RHSShift.getOpcode() == ISD::SHL) { 3249193323Sed std::swap(LHS, RHS); 3250193323Sed std::swap(LHSShift, RHSShift); 3251193323Sed std::swap(LHSMask , RHSMask ); 3252193323Sed } 3253193323Sed 3254193323Sed unsigned OpSizeInBits = VT.getSizeInBits(); 3255193323Sed SDValue LHSShiftArg = LHSShift.getOperand(0); 3256193323Sed SDValue LHSShiftAmt = LHSShift.getOperand(1); 3257193323Sed SDValue RHSShiftAmt = RHSShift.getOperand(1); 3258193323Sed 3259193323Sed // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 3260193323Sed // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 3261193323Sed if (LHSShiftAmt.getOpcode() == ISD::Constant && 3262193323Sed RHSShiftAmt.getOpcode() == ISD::Constant) { 3263193323Sed uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue(); 3264193323Sed uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue(); 3265193323Sed if ((LShVal + RShVal) != OpSizeInBits) 3266193323Sed return 0; 3267193323Sed 3268243830Sdim SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 3269243830Sdim LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); 3270193323Sed 3271193323Sed // If there is an AND of either shifted operand, apply it to the result. 3272193323Sed if (LHSMask.getNode() || RHSMask.getNode()) { 3273193323Sed APInt Mask = APInt::getAllOnesValue(OpSizeInBits); 3274193323Sed 3275193323Sed if (LHSMask.getNode()) { 3276193323Sed APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal); 3277193323Sed Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits; 3278193323Sed } 3279193323Sed if (RHSMask.getNode()) { 3280193323Sed APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal); 3281193323Sed Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits; 3282193323Sed } 3283193323Sed 3284193323Sed Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT)); 3285193323Sed } 3286193323Sed 3287193323Sed return Rot.getNode(); 3288193323Sed } 3289193323Sed 3290193323Sed // If there is a mask here, and we have a variable shift, we can't be sure 3291193323Sed // that we're masking out the right stuff. 3292193323Sed if (LHSMask.getNode() || RHSMask.getNode()) 3293193323Sed return 0; 3294193323Sed 3295193323Sed // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y) 3296193323Sed // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y)) 3297193323Sed if (RHSShiftAmt.getOpcode() == ISD::SUB && 3298193323Sed LHSShiftAmt == RHSShiftAmt.getOperand(1)) { 3299193323Sed if (ConstantSDNode *SUBC = 3300193323Sed dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) { 3301193323Sed if (SUBC->getAPIntValue() == OpSizeInBits) { 3302243830Sdim return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg, 3303243830Sdim HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode(); 3304193323Sed } 3305193323Sed } 3306193323Sed } 3307193323Sed 3308193323Sed // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y) 3309193323Sed // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y)) 3310193323Sed if (LHSShiftAmt.getOpcode() == ISD::SUB && 3311193323Sed RHSShiftAmt == LHSShiftAmt.getOperand(1)) { 3312193323Sed if (ConstantSDNode *SUBC = 3313193323Sed dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) { 3314193323Sed if (SUBC->getAPIntValue() == OpSizeInBits) { 3315243830Sdim return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, LHSShiftArg, 3316243830Sdim HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode(); 3317193323Sed } 3318193323Sed } 3319193323Sed } 3320193323Sed 3321193323Sed // Look for sign/zext/any-extended or truncate cases: 3322243830Sdim if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 3323243830Sdim LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 3324243830Sdim LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 3325243830Sdim LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 3326243830Sdim (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 3327243830Sdim RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 3328243830Sdim RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 3329243830Sdim RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 3330193323Sed SDValue LExtOp0 = LHSShiftAmt.getOperand(0); 3331193323Sed SDValue RExtOp0 = RHSShiftAmt.getOperand(0); 3332193323Sed if (RExtOp0.getOpcode() == ISD::SUB && 3333193323Sed RExtOp0.getOperand(1) == LExtOp0) { 3334193323Sed // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) -> 3335193323Sed // (rotl x, y) 3336193323Sed // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) -> 3337193323Sed // (rotr x, (sub 32, y)) 3338193323Sed if (ConstantSDNode *SUBC = 3339193323Sed dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) { 3340193323Sed if (SUBC->getAPIntValue() == OpSizeInBits) { 3341193323Sed return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 3342193323Sed LHSShiftArg, 3343193323Sed HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode(); 3344193323Sed } 3345193323Sed } 3346193323Sed } else if (LExtOp0.getOpcode() == ISD::SUB && 3347193323Sed RExtOp0 == LExtOp0.getOperand(1)) { 3348193323Sed // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) -> 3349193323Sed // (rotr x, y) 3350193323Sed // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) -> 3351193323Sed // (rotl x, (sub 32, y)) 3352193323Sed if (ConstantSDNode *SUBC = 3353193323Sed dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) { 3354193323Sed if (SUBC->getAPIntValue() == OpSizeInBits) { 3355193323Sed return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, 3356193323Sed LHSShiftArg, 3357193323Sed HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode(); 3358193323Sed } 3359193323Sed } 3360193323Sed } 3361193323Sed } 3362193323Sed 3363193323Sed return 0; 3364193323Sed} 3365193323Sed 3366193323SedSDValue DAGCombiner::visitXOR(SDNode *N) { 3367193323Sed SDValue N0 = N->getOperand(0); 3368193323Sed SDValue N1 = N->getOperand(1); 3369193323Sed SDValue LHS, RHS, CC; 3370193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3371193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3372198090Srdivacky EVT VT = N0.getValueType(); 3373193323Sed 3374193323Sed // fold vector ops 3375193323Sed if (VT.isVector()) { 3376193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 3377193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 3378249423Sdim 3379249423Sdim // fold (xor x, 0) -> x, vector edition 3380249423Sdim if (ISD::isBuildVectorAllZeros(N0.getNode())) 3381249423Sdim return N1; 3382249423Sdim if (ISD::isBuildVectorAllZeros(N1.getNode())) 3383249423Sdim return N0; 3384193323Sed } 3385193323Sed 3386193323Sed // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 3387193323Sed if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 3388193323Sed return DAG.getConstant(0, VT); 3389193323Sed // fold (xor x, undef) -> undef 3390193323Sed if (N0.getOpcode() == ISD::UNDEF) 3391193323Sed return N0; 3392193323Sed if (N1.getOpcode() == ISD::UNDEF) 3393193323Sed return N1; 3394193323Sed // fold (xor c1, c2) -> c1^c2 3395193323Sed if (N0C && N1C) 3396193323Sed return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C); 3397193323Sed // canonicalize constant to RHS 3398193323Sed if (N0C && !N1C) 3399193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0); 3400193323Sed // fold (xor x, 0) -> x 3401193323Sed if (N1C && N1C->isNullValue()) 3402193323Sed return N0; 3403193323Sed // reassociate xor 3404193323Sed SDValue RXOR = ReassociateOps(ISD::XOR, N->getDebugLoc(), N0, N1); 3405193323Sed if (RXOR.getNode() != 0) 3406193323Sed return RXOR; 3407193323Sed 3408193323Sed // fold !(x cc y) -> (x !cc y) 3409193323Sed if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) { 3410193323Sed bool isInt = LHS.getValueType().isInteger(); 3411193323Sed ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3412193323Sed isInt); 3413193323Sed 3414249423Sdim if (!LegalOperations || 3415249423Sdim TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { 3416193323Sed switch (N0.getOpcode()) { 3417193323Sed default: 3418198090Srdivacky llvm_unreachable("Unhandled SetCC Equivalent!"); 3419193323Sed case ISD::SETCC: 3420193323Sed return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC); 3421193323Sed case ISD::SELECT_CC: 3422193323Sed return DAG.getSelectCC(N->getDebugLoc(), LHS, RHS, N0.getOperand(2), 3423193323Sed N0.getOperand(3), NotCC); 3424193323Sed } 3425193323Sed } 3426193323Sed } 3427193323Sed 3428193323Sed // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 3429193323Sed if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND && 3430193323Sed N0.getNode()->hasOneUse() && 3431193323Sed isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 3432193323Sed SDValue V = N0.getOperand(0); 3433193323Sed V = DAG.getNode(ISD::XOR, N0.getDebugLoc(), V.getValueType(), V, 3434193323Sed DAG.getConstant(1, V.getValueType())); 3435193323Sed AddToWorkList(V.getNode()); 3436193323Sed return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V); 3437193323Sed } 3438193323Sed 3439193323Sed // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 3440193323Sed if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 && 3441193323Sed (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3442193323Sed SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3443193323Sed if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 3444193323Sed unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3445193323Sed LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS 3446193323Sed RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS 3447193323Sed AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode()); 3448193323Sed return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS); 3449193323Sed } 3450193323Sed } 3451193323Sed // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 3452193323Sed if (N1C && N1C->isAllOnesValue() && 3453193323Sed (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3454193323Sed SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3455193323Sed if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 3456193323Sed unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3457193323Sed LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS 3458193323Sed RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS 3459193323Sed AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode()); 3460193323Sed return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS); 3461193323Sed } 3462193323Sed } 3463193323Sed // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 3464193323Sed if (N1C && N0.getOpcode() == ISD::XOR) { 3465193323Sed ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 3466193323Sed ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3467193323Sed if (N00C) 3468193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(1), 3469193323Sed DAG.getConstant(N1C->getAPIntValue() ^ 3470193323Sed N00C->getAPIntValue(), VT)); 3471193323Sed if (N01C) 3472193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(0), 3473193323Sed DAG.getConstant(N1C->getAPIntValue() ^ 3474193323Sed N01C->getAPIntValue(), VT)); 3475193323Sed } 3476193323Sed // fold (xor x, x) -> 0 3477218893Sdim if (N0 == N1) 3478218893Sdim return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations); 3479193323Sed 3480193323Sed // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 3481193323Sed if (N0.getOpcode() == N1.getOpcode()) { 3482193323Sed SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3483193323Sed if (Tmp.getNode()) return Tmp; 3484193323Sed } 3485193323Sed 3486193323Sed // Simplify the expression using non-local knowledge. 3487193323Sed if (!VT.isVector() && 3488193323Sed SimplifyDemandedBits(SDValue(N, 0))) 3489193323Sed return SDValue(N, 0); 3490193323Sed 3491193323Sed return SDValue(); 3492193323Sed} 3493193323Sed 3494193323Sed/// visitShiftByConstant - Handle transforms common to the three shifts, when 3495193323Sed/// the shift amount is a constant. 3496193323SedSDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { 3497193323Sed SDNode *LHS = N->getOperand(0).getNode(); 3498193323Sed if (!LHS->hasOneUse()) return SDValue(); 3499193323Sed 3500193323Sed // We want to pull some binops through shifts, so that we have (and (shift)) 3501193323Sed // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 3502193323Sed // thing happens with address calculations, so it's important to canonicalize 3503193323Sed // it. 3504193323Sed bool HighBitSet = false; // Can we transform this if the high bit is set? 3505193323Sed 3506193323Sed switch (LHS->getOpcode()) { 3507193323Sed default: return SDValue(); 3508193323Sed case ISD::OR: 3509193323Sed case ISD::XOR: 3510193323Sed HighBitSet = false; // We can only transform sra if the high bit is clear. 3511193323Sed break; 3512193323Sed case ISD::AND: 3513193323Sed HighBitSet = true; // We can only transform sra if the high bit is set. 3514193323Sed break; 3515193323Sed case ISD::ADD: 3516193323Sed if (N->getOpcode() != ISD::SHL) 3517193323Sed return SDValue(); // only shl(add) not sr[al](add). 3518193323Sed HighBitSet = false; // We can only transform sra if the high bit is clear. 3519193323Sed break; 3520193323Sed } 3521193323Sed 3522193323Sed // We require the RHS of the binop to be a constant as well. 3523193323Sed ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 3524193323Sed if (!BinOpCst) return SDValue(); 3525193323Sed 3526193323Sed // FIXME: disable this unless the input to the binop is a shift by a constant. 3527193323Sed // If it is not a shift, it pessimizes some common cases like: 3528193323Sed // 3529193323Sed // void foo(int *X, int i) { X[i & 1235] = 1; } 3530193323Sed // int bar(int *X, int i) { return X[i & 255]; } 3531193323Sed SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 3532193323Sed if ((BinOpLHSVal->getOpcode() != ISD::SHL && 3533193323Sed BinOpLHSVal->getOpcode() != ISD::SRA && 3534193323Sed BinOpLHSVal->getOpcode() != ISD::SRL) || 3535193323Sed !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) 3536193323Sed return SDValue(); 3537193323Sed 3538198090Srdivacky EVT VT = N->getValueType(0); 3539193323Sed 3540193323Sed // If this is a signed shift right, and the high bit is modified by the 3541193323Sed // logical operation, do not perform the transformation. The highBitSet 3542193323Sed // boolean indicates the value of the high bit of the constant which would 3543193323Sed // cause it to be modified for this operation. 3544193323Sed if (N->getOpcode() == ISD::SRA) { 3545193323Sed bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 3546193323Sed if (BinOpRHSSignSet != HighBitSet) 3547193323Sed return SDValue(); 3548193323Sed } 3549193323Sed 3550193323Sed // Fold the constants, shifting the binop RHS by the shift amount. 3551193323Sed SDValue NewRHS = DAG.getNode(N->getOpcode(), LHS->getOperand(1).getDebugLoc(), 3552193323Sed N->getValueType(0), 3553193323Sed LHS->getOperand(1), N->getOperand(1)); 3554193323Sed 3555193323Sed // Create the new shift. 3556218893Sdim SDValue NewShift = DAG.getNode(N->getOpcode(), 3557218893Sdim LHS->getOperand(0).getDebugLoc(), 3558193323Sed VT, LHS->getOperand(0), N->getOperand(1)); 3559193323Sed 3560193323Sed // Create the new binop. 3561193323Sed return DAG.getNode(LHS->getOpcode(), N->getDebugLoc(), VT, NewShift, NewRHS); 3562193323Sed} 3563193323Sed 3564193323SedSDValue DAGCombiner::visitSHL(SDNode *N) { 3565193323Sed SDValue N0 = N->getOperand(0); 3566193323Sed SDValue N1 = N->getOperand(1); 3567193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3568193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3569198090Srdivacky EVT VT = N0.getValueType(); 3570200581Srdivacky unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3571193323Sed 3572193323Sed // fold (shl c1, c2) -> c1<<c2 3573193323Sed if (N0C && N1C) 3574193323Sed return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C); 3575193323Sed // fold (shl 0, x) -> 0 3576193323Sed if (N0C && N0C->isNullValue()) 3577193323Sed return N0; 3578193323Sed // fold (shl x, c >= size(x)) -> undef 3579193323Sed if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3580193323Sed return DAG.getUNDEF(VT); 3581193323Sed // fold (shl x, 0) -> x 3582193323Sed if (N1C && N1C->isNullValue()) 3583193323Sed return N0; 3584224145Sdim // fold (shl undef, x) -> 0 3585224145Sdim if (N0.getOpcode() == ISD::UNDEF) 3586224145Sdim return DAG.getConstant(0, VT); 3587193323Sed // if (shl x, c) is known to be zero, return 0 3588193323Sed if (DAG.MaskedValueIsZero(SDValue(N, 0), 3589200581Srdivacky APInt::getAllOnesValue(OpSizeInBits))) 3590193323Sed return DAG.getConstant(0, VT); 3591193323Sed // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 3592193323Sed if (N1.getOpcode() == ISD::TRUNCATE && 3593193323Sed N1.getOperand(0).getOpcode() == ISD::AND && 3594193323Sed N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3595193323Sed SDValue N101 = N1.getOperand(0).getOperand(1); 3596193323Sed if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3597198090Srdivacky EVT TruncVT = N1.getValueType(); 3598193323Sed SDValue N100 = N1.getOperand(0).getOperand(0); 3599193323Sed APInt TruncC = N101C->getAPIntValue(); 3600218893Sdim TruncC = TruncC.trunc(TruncVT.getSizeInBits()); 3601193323Sed return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 3602193323Sed DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT, 3603193323Sed DAG.getNode(ISD::TRUNCATE, 3604193323Sed N->getDebugLoc(), 3605193323Sed TruncVT, N100), 3606193323Sed DAG.getConstant(TruncC, TruncVT))); 3607193323Sed } 3608193323Sed } 3609193323Sed 3610193323Sed if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3611193323Sed return SDValue(N, 0); 3612193323Sed 3613193323Sed // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 3614193323Sed if (N1C && N0.getOpcode() == ISD::SHL && 3615193323Sed N0.getOperand(1).getOpcode() == ISD::Constant) { 3616193323Sed uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3617193323Sed uint64_t c2 = N1C->getZExtValue(); 3618218893Sdim if (c1 + c2 >= OpSizeInBits) 3619193323Sed return DAG.getConstant(0, VT); 3620193323Sed return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0), 3621193323Sed DAG.getConstant(c1 + c2, N1.getValueType())); 3622193323Sed } 3623218893Sdim 3624218893Sdim // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 3625218893Sdim // For this to be valid, the second form must not preserve any of the bits 3626218893Sdim // that are shifted out by the inner shift in the first form. This means 3627218893Sdim // the outer shift size must be >= the number of bits added by the ext. 3628218893Sdim // As a corollary, we don't care what kind of ext it is. 3629218893Sdim if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 3630218893Sdim N0.getOpcode() == ISD::ANY_EXTEND || 3631218893Sdim N0.getOpcode() == ISD::SIGN_EXTEND) && 3632218893Sdim N0.getOperand(0).getOpcode() == ISD::SHL && 3633218893Sdim isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 3634219077Sdim uint64_t c1 = 3635218893Sdim cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 3636218893Sdim uint64_t c2 = N1C->getZExtValue(); 3637218893Sdim EVT InnerShiftVT = N0.getOperand(0).getValueType(); 3638218893Sdim uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits(); 3639218893Sdim if (c2 >= OpSizeInBits - InnerShiftSize) { 3640218893Sdim if (c1 + c2 >= OpSizeInBits) 3641218893Sdim return DAG.getConstant(0, VT); 3642218893Sdim return DAG.getNode(ISD::SHL, N0->getDebugLoc(), VT, 3643218893Sdim DAG.getNode(N0.getOpcode(), N0->getDebugLoc(), VT, 3644218893Sdim N0.getOperand(0)->getOperand(0)), 3645218893Sdim DAG.getConstant(c1 + c2, N1.getValueType())); 3646218893Sdim } 3647218893Sdim } 3648218893Sdim 3649223017Sdim // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 3650223017Sdim // (and (srl x, (sub c1, c2), MASK) 3651234353Sdim // Only fold this if the inner shift has no other uses -- if it does, folding 3652234353Sdim // this will increase the total number of instructions. 3653234353Sdim if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() && 3654193323Sed N0.getOperand(1).getOpcode() == ISD::Constant) { 3655193323Sed uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3656198090Srdivacky if (c1 < VT.getSizeInBits()) { 3657198090Srdivacky uint64_t c2 = N1C->getZExtValue(); 3658223017Sdim APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3659223017Sdim VT.getSizeInBits() - c1); 3660223017Sdim SDValue Shift; 3661223017Sdim if (c2 > c1) { 3662223017Sdim Mask = Mask.shl(c2-c1); 3663223017Sdim Shift = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0), 3664223017Sdim DAG.getConstant(c2-c1, N1.getValueType())); 3665223017Sdim } else { 3666223017Sdim Mask = Mask.lshr(c1-c2); 3667223017Sdim Shift = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), 3668223017Sdim DAG.getConstant(c1-c2, N1.getValueType())); 3669223017Sdim } 3670223017Sdim return DAG.getNode(ISD::AND, N0.getDebugLoc(), VT, Shift, 3671223017Sdim DAG.getConstant(Mask, VT)); 3672198090Srdivacky } 3673193323Sed } 3674193323Sed // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 3675198090Srdivacky if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) { 3676198090Srdivacky SDValue HiBitsMask = 3677198090Srdivacky DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(), 3678198090Srdivacky VT.getSizeInBits() - 3679198090Srdivacky N1C->getZExtValue()), 3680198090Srdivacky VT); 3681193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0), 3682198090Srdivacky HiBitsMask); 3683198090Srdivacky } 3684193323Sed 3685207618Srdivacky if (N1C) { 3686207618Srdivacky SDValue NewSHL = visitShiftByConstant(N, N1C->getZExtValue()); 3687207618Srdivacky if (NewSHL.getNode()) 3688207618Srdivacky return NewSHL; 3689207618Srdivacky } 3690207618Srdivacky 3691207618Srdivacky return SDValue(); 3692193323Sed} 3693193323Sed 3694193323SedSDValue DAGCombiner::visitSRA(SDNode *N) { 3695193323Sed SDValue N0 = N->getOperand(0); 3696193323Sed SDValue N1 = N->getOperand(1); 3697193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3698193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3699198090Srdivacky EVT VT = N0.getValueType(); 3700200581Srdivacky unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3701193323Sed 3702193323Sed // fold (sra c1, c2) -> (sra c1, c2) 3703193323Sed if (N0C && N1C) 3704193323Sed return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C); 3705193323Sed // fold (sra 0, x) -> 0 3706193323Sed if (N0C && N0C->isNullValue()) 3707193323Sed return N0; 3708193323Sed // fold (sra -1, x) -> -1 3709193323Sed if (N0C && N0C->isAllOnesValue()) 3710193323Sed return N0; 3711193323Sed // fold (sra x, (setge c, size(x))) -> undef 3712200581Srdivacky if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3713193323Sed return DAG.getUNDEF(VT); 3714193323Sed // fold (sra x, 0) -> x 3715193323Sed if (N1C && N1C->isNullValue()) 3716193323Sed return N0; 3717193323Sed // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 3718193323Sed // sext_inreg. 3719193323Sed if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 3720200581Srdivacky unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 3721202375Srdivacky EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 3722202375Srdivacky if (VT.isVector()) 3723202375Srdivacky ExtVT = EVT::getVectorVT(*DAG.getContext(), 3724202375Srdivacky ExtVT, VT.getVectorNumElements()); 3725202375Srdivacky if ((!LegalOperations || 3726202375Srdivacky TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 3727193323Sed return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 3728202375Srdivacky N0.getOperand(0), DAG.getValueType(ExtVT)); 3729193323Sed } 3730193323Sed 3731193323Sed // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 3732193323Sed if (N1C && N0.getOpcode() == ISD::SRA) { 3733193323Sed if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3734193323Sed unsigned Sum = N1C->getZExtValue() + C1->getZExtValue(); 3735200581Srdivacky if (Sum >= OpSizeInBits) Sum = OpSizeInBits-1; 3736193323Sed return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0.getOperand(0), 3737193323Sed DAG.getConstant(Sum, N1C->getValueType(0))); 3738193323Sed } 3739193323Sed } 3740193323Sed 3741193323Sed // fold (sra (shl X, m), (sub result_size, n)) 3742193323Sed // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 3743193323Sed // result_size - n != m. 3744193323Sed // If truncate is free for the target sext(shl) is likely to result in better 3745193323Sed // code. 3746193323Sed if (N0.getOpcode() == ISD::SHL) { 3747193323Sed // Get the two constanst of the shifts, CN0 = m, CN = n. 3748193323Sed const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3749193323Sed if (N01C && N1C) { 3750193323Sed // Determine what the truncate's result bitsize and type would be. 3751198090Srdivacky EVT TruncVT = 3752218893Sdim EVT::getIntegerVT(*DAG.getContext(), 3753218893Sdim OpSizeInBits - N1C->getZExtValue()); 3754193323Sed // Determine the residual right-shift amount. 3755193323Sed signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 3756193323Sed 3757193323Sed // If the shift is not a no-op (in which case this should be just a sign 3758193323Sed // extend already), the truncated to type is legal, sign_extend is legal 3759203954Srdivacky // on that type, and the truncate to that type is both legal and free, 3760193323Sed // perform the transform. 3761193323Sed if ((ShiftAmt > 0) && 3762193323Sed TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 3763193323Sed TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 3764193323Sed TLI.isTruncateFree(VT, TruncVT)) { 3765193323Sed 3766219077Sdim SDValue Amt = DAG.getConstant(ShiftAmt, 3767219077Sdim getShiftAmountTy(N0.getOperand(0).getValueType())); 3768193323Sed SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, 3769193323Sed N0.getOperand(0), Amt); 3770193323Sed SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT, 3771193323Sed Shift); 3772193323Sed return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), 3773193323Sed N->getValueType(0), Trunc); 3774193323Sed } 3775193323Sed } 3776193323Sed } 3777193323Sed 3778193323Sed // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 3779193323Sed if (N1.getOpcode() == ISD::TRUNCATE && 3780193323Sed N1.getOperand(0).getOpcode() == ISD::AND && 3781193323Sed N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3782193323Sed SDValue N101 = N1.getOperand(0).getOperand(1); 3783193323Sed if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3784198090Srdivacky EVT TruncVT = N1.getValueType(); 3785193323Sed SDValue N100 = N1.getOperand(0).getOperand(0); 3786193323Sed APInt TruncC = N101C->getAPIntValue(); 3787218893Sdim TruncC = TruncC.trunc(TruncVT.getScalarType().getSizeInBits()); 3788193323Sed return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0, 3789193323Sed DAG.getNode(ISD::AND, N->getDebugLoc(), 3790193323Sed TruncVT, 3791193323Sed DAG.getNode(ISD::TRUNCATE, 3792193323Sed N->getDebugLoc(), 3793193323Sed TruncVT, N100), 3794193323Sed DAG.getConstant(TruncC, TruncVT))); 3795193323Sed } 3796193323Sed } 3797193323Sed 3798218893Sdim // fold (sra (trunc (sr x, c1)), c2) -> (trunc (sra x, c1+c2)) 3799218893Sdim // if c1 is equal to the number of bits the trunc removes 3800218893Sdim if (N0.getOpcode() == ISD::TRUNCATE && 3801218893Sdim (N0.getOperand(0).getOpcode() == ISD::SRL || 3802218893Sdim N0.getOperand(0).getOpcode() == ISD::SRA) && 3803218893Sdim N0.getOperand(0).hasOneUse() && 3804218893Sdim N0.getOperand(0).getOperand(1).hasOneUse() && 3805218893Sdim N1C && isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) { 3806218893Sdim EVT LargeVT = N0.getOperand(0).getValueType(); 3807218893Sdim ConstantSDNode *LargeShiftAmt = 3808218893Sdim cast<ConstantSDNode>(N0.getOperand(0).getOperand(1)); 3809218893Sdim 3810218893Sdim if (LargeVT.getScalarType().getSizeInBits() - OpSizeInBits == 3811218893Sdim LargeShiftAmt->getZExtValue()) { 3812218893Sdim SDValue Amt = 3813218893Sdim DAG.getConstant(LargeShiftAmt->getZExtValue() + N1C->getZExtValue(), 3814219077Sdim getShiftAmountTy(N0.getOperand(0).getOperand(0).getValueType())); 3815218893Sdim SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), LargeVT, 3816218893Sdim N0.getOperand(0).getOperand(0), Amt); 3817218893Sdim return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, SRA); 3818218893Sdim } 3819218893Sdim } 3820218893Sdim 3821193323Sed // Simplify, based on bits shifted out of the LHS. 3822193323Sed if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3823193323Sed return SDValue(N, 0); 3824193323Sed 3825193323Sed 3826193323Sed // If the sign bit is known to be zero, switch this to a SRL. 3827193323Sed if (DAG.SignBitIsZero(N0)) 3828193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1); 3829193323Sed 3830207618Srdivacky if (N1C) { 3831207618Srdivacky SDValue NewSRA = visitShiftByConstant(N, N1C->getZExtValue()); 3832207618Srdivacky if (NewSRA.getNode()) 3833207618Srdivacky return NewSRA; 3834207618Srdivacky } 3835207618Srdivacky 3836207618Srdivacky return SDValue(); 3837193323Sed} 3838193323Sed 3839193323SedSDValue DAGCombiner::visitSRL(SDNode *N) { 3840193323Sed SDValue N0 = N->getOperand(0); 3841193323Sed SDValue N1 = N->getOperand(1); 3842193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3843193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3844198090Srdivacky EVT VT = N0.getValueType(); 3845200581Srdivacky unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3846193323Sed 3847193323Sed // fold (srl c1, c2) -> c1 >>u c2 3848193323Sed if (N0C && N1C) 3849193323Sed return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C); 3850193323Sed // fold (srl 0, x) -> 0 3851193323Sed if (N0C && N0C->isNullValue()) 3852193323Sed return N0; 3853193323Sed // fold (srl x, c >= size(x)) -> undef 3854193323Sed if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3855193323Sed return DAG.getUNDEF(VT); 3856193323Sed // fold (srl x, 0) -> x 3857193323Sed if (N1C && N1C->isNullValue()) 3858193323Sed return N0; 3859193323Sed // if (srl x, c) is known to be zero, return 0 3860193323Sed if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 3861193323Sed APInt::getAllOnesValue(OpSizeInBits))) 3862193323Sed return DAG.getConstant(0, VT); 3863193323Sed 3864193323Sed // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 3865193323Sed if (N1C && N0.getOpcode() == ISD::SRL && 3866193323Sed N0.getOperand(1).getOpcode() == ISD::Constant) { 3867193323Sed uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3868193323Sed uint64_t c2 = N1C->getZExtValue(); 3869218893Sdim if (c1 + c2 >= OpSizeInBits) 3870193323Sed return DAG.getConstant(0, VT); 3871193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), 3872193323Sed DAG.getConstant(c1 + c2, N1.getValueType())); 3873193323Sed } 3874218893Sdim 3875218893Sdim // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 3876218893Sdim if (N1C && N0.getOpcode() == ISD::TRUNCATE && 3877218893Sdim N0.getOperand(0).getOpcode() == ISD::SRL && 3878218893Sdim isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 3879219077Sdim uint64_t c1 = 3880218893Sdim cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 3881218893Sdim uint64_t c2 = N1C->getZExtValue(); 3882218893Sdim EVT InnerShiftVT = N0.getOperand(0).getValueType(); 3883218893Sdim EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 3884218893Sdim uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits(); 3885218893Sdim // This is only valid if the OpSizeInBits + c1 = size of inner shift. 3886218893Sdim if (c1 + OpSizeInBits == InnerShiftSize) { 3887218893Sdim if (c1 + c2 >= InnerShiftSize) 3888218893Sdim return DAG.getConstant(0, VT); 3889218893Sdim return DAG.getNode(ISD::TRUNCATE, N0->getDebugLoc(), VT, 3890219077Sdim DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT, 3891218893Sdim N0.getOperand(0)->getOperand(0), 3892218893Sdim DAG.getConstant(c1 + c2, ShiftCountVT))); 3893218893Sdim } 3894218893Sdim } 3895218893Sdim 3896207618Srdivacky // fold (srl (shl x, c), c) -> (and x, cst2) 3897207618Srdivacky if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && 3898207618Srdivacky N0.getValueSizeInBits() <= 64) { 3899207618Srdivacky uint64_t ShAmt = N1C->getZExtValue()+64-N0.getValueSizeInBits(); 3900207618Srdivacky return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0), 3901207618Srdivacky DAG.getConstant(~0ULL >> ShAmt, VT)); 3902207618Srdivacky } 3903193323Sed 3904218893Sdim 3905193323Sed // fold (srl (anyextend x), c) -> (anyextend (srl x, c)) 3906193323Sed if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 3907193323Sed // Shifting in all undef bits? 3908198090Srdivacky EVT SmallVT = N0.getOperand(0).getValueType(); 3909193323Sed if (N1C->getZExtValue() >= SmallVT.getSizeInBits()) 3910193323Sed return DAG.getUNDEF(VT); 3911193323Sed 3912207618Srdivacky if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 3913221345Sdim uint64_t ShiftAmt = N1C->getZExtValue(); 3914207618Srdivacky SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT, 3915221345Sdim N0.getOperand(0), 3916221345Sdim DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT))); 3917207618Srdivacky AddToWorkList(SmallShift.getNode()); 3918207618Srdivacky return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift); 3919207618Srdivacky } 3920193323Sed } 3921193323Sed 3922193323Sed // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 3923193323Sed // bit, which is unmodified by sra. 3924193323Sed if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) { 3925193323Sed if (N0.getOpcode() == ISD::SRA) 3926193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1); 3927193323Sed } 3928193323Sed 3929193323Sed // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 3930193323Sed if (N1C && N0.getOpcode() == ISD::CTLZ && 3931193323Sed N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) { 3932193323Sed APInt KnownZero, KnownOne; 3933234353Sdim DAG.ComputeMaskedBits(N0.getOperand(0), KnownZero, KnownOne); 3934193323Sed 3935193323Sed // If any of the input bits are KnownOne, then the input couldn't be all 3936193323Sed // zeros, thus the result of the srl will always be zero. 3937193323Sed if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT); 3938193323Sed 3939193323Sed // If all of the bits input the to ctlz node are known to be zero, then 3940193323Sed // the result of the ctlz is "32" and the result of the shift is one. 3941234353Sdim APInt UnknownBits = ~KnownZero; 3942193323Sed if (UnknownBits == 0) return DAG.getConstant(1, VT); 3943193323Sed 3944193323Sed // Otherwise, check to see if there is exactly one bit input to the ctlz. 3945193323Sed if ((UnknownBits & (UnknownBits - 1)) == 0) { 3946193323Sed // Okay, we know that only that the single bit specified by UnknownBits 3947193323Sed // could be set on input to the CTLZ node. If this bit is set, the SRL 3948193323Sed // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 3949193323Sed // to an SRL/XOR pair, which is likely to simplify more. 3950193323Sed unsigned ShAmt = UnknownBits.countTrailingZeros(); 3951193323Sed SDValue Op = N0.getOperand(0); 3952193323Sed 3953193323Sed if (ShAmt) { 3954193323Sed Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op, 3955219077Sdim DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType()))); 3956193323Sed AddToWorkList(Op.getNode()); 3957193323Sed } 3958193323Sed 3959193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, 3960193323Sed Op, DAG.getConstant(1, VT)); 3961193323Sed } 3962193323Sed } 3963193323Sed 3964193323Sed // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 3965193323Sed if (N1.getOpcode() == ISD::TRUNCATE && 3966193323Sed N1.getOperand(0).getOpcode() == ISD::AND && 3967193323Sed N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3968193323Sed SDValue N101 = N1.getOperand(0).getOperand(1); 3969193323Sed if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3970198090Srdivacky EVT TruncVT = N1.getValueType(); 3971193323Sed SDValue N100 = N1.getOperand(0).getOperand(0); 3972193323Sed APInt TruncC = N101C->getAPIntValue(); 3973218893Sdim TruncC = TruncC.trunc(TruncVT.getSizeInBits()); 3974193323Sed return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, 3975193323Sed DAG.getNode(ISD::AND, N->getDebugLoc(), 3976193323Sed TruncVT, 3977193323Sed DAG.getNode(ISD::TRUNCATE, 3978193323Sed N->getDebugLoc(), 3979193323Sed TruncVT, N100), 3980193323Sed DAG.getConstant(TruncC, TruncVT))); 3981193323Sed } 3982193323Sed } 3983193323Sed 3984193323Sed // fold operands of srl based on knowledge that the low bits are not 3985193323Sed // demanded. 3986193323Sed if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3987193323Sed return SDValue(N, 0); 3988193323Sed 3989201360Srdivacky if (N1C) { 3990201360Srdivacky SDValue NewSRL = visitShiftByConstant(N, N1C->getZExtValue()); 3991201360Srdivacky if (NewSRL.getNode()) 3992201360Srdivacky return NewSRL; 3993201360Srdivacky } 3994201360Srdivacky 3995210299Sed // Attempt to convert a srl of a load into a narrower zero-extending load. 3996210299Sed SDValue NarrowLoad = ReduceLoadWidth(N); 3997210299Sed if (NarrowLoad.getNode()) 3998210299Sed return NarrowLoad; 3999210299Sed 4000201360Srdivacky // Here is a common situation. We want to optimize: 4001201360Srdivacky // 4002201360Srdivacky // %a = ... 4003201360Srdivacky // %b = and i32 %a, 2 4004201360Srdivacky // %c = srl i32 %b, 1 4005201360Srdivacky // brcond i32 %c ... 4006201360Srdivacky // 4007201360Srdivacky // into 4008218893Sdim // 4009201360Srdivacky // %a = ... 4010201360Srdivacky // %b = and %a, 2 4011201360Srdivacky // %c = setcc eq %b, 0 4012201360Srdivacky // brcond %c ... 4013201360Srdivacky // 4014201360Srdivacky // However when after the source operand of SRL is optimized into AND, the SRL 4015201360Srdivacky // itself may not be optimized further. Look for it and add the BRCOND into 4016201360Srdivacky // the worklist. 4017202375Srdivacky if (N->hasOneUse()) { 4018202375Srdivacky SDNode *Use = *N->use_begin(); 4019202375Srdivacky if (Use->getOpcode() == ISD::BRCOND) 4020202375Srdivacky AddToWorkList(Use); 4021202375Srdivacky else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 4022202375Srdivacky // Also look pass the truncate. 4023202375Srdivacky Use = *Use->use_begin(); 4024202375Srdivacky if (Use->getOpcode() == ISD::BRCOND) 4025202375Srdivacky AddToWorkList(Use); 4026202375Srdivacky } 4027202375Srdivacky } 4028201360Srdivacky 4029201360Srdivacky return SDValue(); 4030193323Sed} 4031193323Sed 4032193323SedSDValue DAGCombiner::visitCTLZ(SDNode *N) { 4033193323Sed SDValue N0 = N->getOperand(0); 4034198090Srdivacky EVT VT = N->getValueType(0); 4035193323Sed 4036193323Sed // fold (ctlz c1) -> c2 4037193323Sed if (isa<ConstantSDNode>(N0)) 4038193323Sed return DAG.getNode(ISD::CTLZ, N->getDebugLoc(), VT, N0); 4039193323Sed return SDValue(); 4040193323Sed} 4041193323Sed 4042234353SdimSDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 4043234353Sdim SDValue N0 = N->getOperand(0); 4044234353Sdim EVT VT = N->getValueType(0); 4045234353Sdim 4046234353Sdim // fold (ctlz_zero_undef c1) -> c2 4047234353Sdim if (isa<ConstantSDNode>(N0)) 4048234353Sdim return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0); 4049234353Sdim return SDValue(); 4050234353Sdim} 4051234353Sdim 4052193323SedSDValue DAGCombiner::visitCTTZ(SDNode *N) { 4053193323Sed SDValue N0 = N->getOperand(0); 4054198090Srdivacky EVT VT = N->getValueType(0); 4055193323Sed 4056193323Sed // fold (cttz c1) -> c2 4057193323Sed if (isa<ConstantSDNode>(N0)) 4058193323Sed return DAG.getNode(ISD::CTTZ, N->getDebugLoc(), VT, N0); 4059193323Sed return SDValue(); 4060193323Sed} 4061193323Sed 4062234353SdimSDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 4063234353Sdim SDValue N0 = N->getOperand(0); 4064234353Sdim EVT VT = N->getValueType(0); 4065234353Sdim 4066234353Sdim // fold (cttz_zero_undef c1) -> c2 4067234353Sdim if (isa<ConstantSDNode>(N0)) 4068234353Sdim return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0); 4069234353Sdim return SDValue(); 4070234353Sdim} 4071234353Sdim 4072193323SedSDValue DAGCombiner::visitCTPOP(SDNode *N) { 4073193323Sed SDValue N0 = N->getOperand(0); 4074198090Srdivacky EVT VT = N->getValueType(0); 4075193323Sed 4076193323Sed // fold (ctpop c1) -> c2 4077193323Sed if (isa<ConstantSDNode>(N0)) 4078193323Sed return DAG.getNode(ISD::CTPOP, N->getDebugLoc(), VT, N0); 4079193323Sed return SDValue(); 4080193323Sed} 4081193323Sed 4082193323SedSDValue DAGCombiner::visitSELECT(SDNode *N) { 4083193323Sed SDValue N0 = N->getOperand(0); 4084193323Sed SDValue N1 = N->getOperand(1); 4085193323Sed SDValue N2 = N->getOperand(2); 4086193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4087193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4088193323Sed ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4089198090Srdivacky EVT VT = N->getValueType(0); 4090198090Srdivacky EVT VT0 = N0.getValueType(); 4091193323Sed 4092193323Sed // fold (select C, X, X) -> X 4093193323Sed if (N1 == N2) 4094193323Sed return N1; 4095193323Sed // fold (select true, X, Y) -> X 4096193323Sed if (N0C && !N0C->isNullValue()) 4097193323Sed return N1; 4098193323Sed // fold (select false, X, Y) -> Y 4099193323Sed if (N0C && N0C->isNullValue()) 4100193323Sed return N2; 4101193323Sed // fold (select C, 1, X) -> (or C, X) 4102193323Sed if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1) 4103193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2); 4104193323Sed // fold (select C, 0, 1) -> (xor C, 1) 4105193323Sed if (VT.isInteger() && 4106193323Sed (VT0 == MVT::i1 || 4107193323Sed (VT0.isInteger() && 4108243830Sdim TLI.getBooleanContents(false) == 4109243830Sdim TargetLowering::ZeroOrOneBooleanContent)) && 4110193323Sed N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { 4111193323Sed SDValue XORNode; 4112193323Sed if (VT == VT0) 4113193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT0, 4114193323Sed N0, DAG.getConstant(1, VT0)); 4115193323Sed XORNode = DAG.getNode(ISD::XOR, N0.getDebugLoc(), VT0, 4116193323Sed N0, DAG.getConstant(1, VT0)); 4117193323Sed AddToWorkList(XORNode.getNode()); 4118193323Sed if (VT.bitsGT(VT0)) 4119193323Sed return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, XORNode); 4120193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode); 4121193323Sed } 4122193323Sed // fold (select C, 0, X) -> (and (not C), X) 4123193323Sed if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) { 4124193323Sed SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT); 4125193323Sed AddToWorkList(NOTNode.getNode()); 4126193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2); 4127193323Sed } 4128193323Sed // fold (select C, X, 1) -> (or (not C), X) 4129193323Sed if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) { 4130193323Sed SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT); 4131193323Sed AddToWorkList(NOTNode.getNode()); 4132193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1); 4133193323Sed } 4134193323Sed // fold (select C, X, 0) -> (and C, X) 4135193323Sed if (VT == MVT::i1 && N2C && N2C->isNullValue()) 4136193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1); 4137193323Sed // fold (select X, X, Y) -> (or X, Y) 4138193323Sed // fold (select X, 1, Y) -> (or X, Y) 4139193323Sed if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1))) 4140193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2); 4141193323Sed // fold (select X, Y, X) -> (and X, Y) 4142193323Sed // fold (select X, Y, 0) -> (and X, Y) 4143193323Sed if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0))) 4144193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1); 4145193323Sed 4146193323Sed // If we can fold this based on the true/false value, do so. 4147193323Sed if (SimplifySelectOps(N, N1, N2)) 4148193323Sed return SDValue(N, 0); // Don't revisit N. 4149193323Sed 4150193323Sed // fold selects based on a setcc into other things, such as min/max/abs 4151193323Sed if (N0.getOpcode() == ISD::SETCC) { 4152193323Sed // FIXME: 4153193323Sed // Check against MVT::Other for SELECT_CC, which is a workaround for targets 4154193323Sed // having to say they don't support SELECT_CC on every type the DAG knows 4155193323Sed // about, since there is no way to mark an opcode illegal at all value types 4156198090Srdivacky if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) && 4157198090Srdivacky TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) 4158193323Sed return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, 4159193323Sed N0.getOperand(0), N0.getOperand(1), 4160193323Sed N1, N2, N0.getOperand(2)); 4161193323Sed return SimplifySelect(N->getDebugLoc(), N0, N1, N2); 4162193323Sed } 4163193323Sed 4164193323Sed return SDValue(); 4165193323Sed} 4166193323Sed 4167251662SdimSDValue DAGCombiner::visitVSELECT(SDNode *N) { 4168251662Sdim SDValue N0 = N->getOperand(0); 4169251662Sdim SDValue N1 = N->getOperand(1); 4170251662Sdim SDValue N2 = N->getOperand(2); 4171251662Sdim DebugLoc DL = N->getDebugLoc(); 4172251662Sdim 4173251662Sdim // Canonicalize integer abs. 4174251662Sdim // vselect (setg[te] X, 0), X, -X -> 4175251662Sdim // vselect (setgt X, -1), X, -X -> 4176251662Sdim // vselect (setl[te] X, 0), -X, X -> 4177251662Sdim // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 4178251662Sdim if (N0.getOpcode() == ISD::SETCC) { 4179251662Sdim SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4180251662Sdim ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4181251662Sdim bool isAbs = false; 4182251662Sdim bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 4183251662Sdim 4184251662Sdim if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || 4185251662Sdim (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && 4186251662Sdim N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) 4187251662Sdim isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); 4188251662Sdim else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && 4189251662Sdim N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) 4190251662Sdim isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 4191251662Sdim 4192251662Sdim if (isAbs) { 4193251662Sdim EVT VT = LHS.getValueType(); 4194251662Sdim SDValue Shift = DAG.getNode( 4195251662Sdim ISD::SRA, DL, VT, LHS, 4196251662Sdim DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, VT)); 4197251662Sdim SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); 4198251662Sdim AddToWorkList(Shift.getNode()); 4199251662Sdim AddToWorkList(Add.getNode()); 4200251662Sdim return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); 4201251662Sdim } 4202251662Sdim } 4203251662Sdim 4204251662Sdim return SDValue(); 4205251662Sdim} 4206251662Sdim 4207193323SedSDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 4208193323Sed SDValue N0 = N->getOperand(0); 4209193323Sed SDValue N1 = N->getOperand(1); 4210193323Sed SDValue N2 = N->getOperand(2); 4211193323Sed SDValue N3 = N->getOperand(3); 4212193323Sed SDValue N4 = N->getOperand(4); 4213193323Sed ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 4214193323Sed 4215193323Sed // fold select_cc lhs, rhs, x, x, cc -> x 4216193323Sed if (N2 == N3) 4217193323Sed return N2; 4218193323Sed 4219193323Sed // Determine if the condition we're dealing with is constant 4220193323Sed SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()), 4221193323Sed N0, N1, CC, N->getDebugLoc(), false); 4222193323Sed if (SCC.getNode()) AddToWorkList(SCC.getNode()); 4223193323Sed 4224193323Sed if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) { 4225193323Sed if (!SCCC->isNullValue()) 4226193323Sed return N2; // cond always true -> true val 4227193323Sed else 4228193323Sed return N3; // cond always false -> false val 4229193323Sed } 4230193323Sed 4231193323Sed // Fold to a simpler select_cc 4232193323Sed if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC) 4233193323Sed return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N2.getValueType(), 4234193323Sed SCC.getOperand(0), SCC.getOperand(1), N2, N3, 4235193323Sed SCC.getOperand(2)); 4236193323Sed 4237193323Sed // If we can fold this based on the true/false value, do so. 4238193323Sed if (SimplifySelectOps(N, N2, N3)) 4239193323Sed return SDValue(N, 0); // Don't revisit N. 4240193323Sed 4241193323Sed // fold select_cc into other things, such as min/max/abs 4242193323Sed return SimplifySelectCC(N->getDebugLoc(), N0, N1, N2, N3, CC); 4243193323Sed} 4244193323Sed 4245193323SedSDValue DAGCombiner::visitSETCC(SDNode *N) { 4246193323Sed return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 4247193323Sed cast<CondCodeSDNode>(N->getOperand(2))->get(), 4248193323Sed N->getDebugLoc()); 4249193323Sed} 4250193323Sed 4251193323Sed// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 4252193323Sed// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 4253193323Sed// transformation. Returns true if extension are possible and the above 4254193323Sed// mentioned transformation is profitable. 4255193323Sedstatic bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 4256193323Sed unsigned ExtOpc, 4257193323Sed SmallVector<SDNode*, 4> &ExtendNodes, 4258193323Sed const TargetLowering &TLI) { 4259193323Sed bool HasCopyToRegUses = false; 4260193323Sed bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 4261193323Sed for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 4262193323Sed UE = N0.getNode()->use_end(); 4263193323Sed UI != UE; ++UI) { 4264193323Sed SDNode *User = *UI; 4265193323Sed if (User == N) 4266193323Sed continue; 4267193323Sed if (UI.getUse().getResNo() != N0.getResNo()) 4268193323Sed continue; 4269193323Sed // FIXME: Only extend SETCC N, N and SETCC N, c for now. 4270193323Sed if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 4271193323Sed ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 4272193323Sed if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 4273193323Sed // Sign bits will be lost after a zext. 4274193323Sed return false; 4275193323Sed bool Add = false; 4276193323Sed for (unsigned i = 0; i != 2; ++i) { 4277193323Sed SDValue UseOp = User->getOperand(i); 4278193323Sed if (UseOp == N0) 4279193323Sed continue; 4280193323Sed if (!isa<ConstantSDNode>(UseOp)) 4281193323Sed return false; 4282193323Sed Add = true; 4283193323Sed } 4284193323Sed if (Add) 4285193323Sed ExtendNodes.push_back(User); 4286193323Sed continue; 4287193323Sed } 4288193323Sed // If truncates aren't free and there are users we can't 4289193323Sed // extend, it isn't worthwhile. 4290193323Sed if (!isTruncFree) 4291193323Sed return false; 4292193323Sed // Remember if this value is live-out. 4293193323Sed if (User->getOpcode() == ISD::CopyToReg) 4294193323Sed HasCopyToRegUses = true; 4295193323Sed } 4296193323Sed 4297193323Sed if (HasCopyToRegUses) { 4298193323Sed bool BothLiveOut = false; 4299193323Sed for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 4300193323Sed UI != UE; ++UI) { 4301193323Sed SDUse &Use = UI.getUse(); 4302193323Sed if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 4303193323Sed BothLiveOut = true; 4304193323Sed break; 4305193323Sed } 4306193323Sed } 4307193323Sed if (BothLiveOut) 4308193323Sed // Both unextended and extended values are live out. There had better be 4309218893Sdim // a good reason for the transformation. 4310193323Sed return ExtendNodes.size(); 4311193323Sed } 4312193323Sed return true; 4313193323Sed} 4314193323Sed 4315224145Sdimvoid DAGCombiner::ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs, 4316224145Sdim SDValue Trunc, SDValue ExtLoad, DebugLoc DL, 4317224145Sdim ISD::NodeType ExtType) { 4318224145Sdim // Extend SetCC uses if necessary. 4319224145Sdim for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 4320224145Sdim SDNode *SetCC = SetCCs[i]; 4321224145Sdim SmallVector<SDValue, 4> Ops; 4322224145Sdim 4323224145Sdim for (unsigned j = 0; j != 2; ++j) { 4324224145Sdim SDValue SOp = SetCC->getOperand(j); 4325224145Sdim if (SOp == Trunc) 4326224145Sdim Ops.push_back(ExtLoad); 4327224145Sdim else 4328224145Sdim Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 4329224145Sdim } 4330224145Sdim 4331224145Sdim Ops.push_back(SetCC->getOperand(2)); 4332224145Sdim CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), 4333224145Sdim &Ops[0], Ops.size())); 4334224145Sdim } 4335224145Sdim} 4336224145Sdim 4337193323SedSDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 4338193323Sed SDValue N0 = N->getOperand(0); 4339198090Srdivacky EVT VT = N->getValueType(0); 4340193323Sed 4341193323Sed // fold (sext c1) -> c1 4342193323Sed if (isa<ConstantSDNode>(N0)) 4343193323Sed return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0); 4344193323Sed 4345193323Sed // fold (sext (sext x)) -> (sext x) 4346193323Sed // fold (sext (aext x)) -> (sext x) 4347193323Sed if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 4348193323Sed return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, 4349193323Sed N0.getOperand(0)); 4350193323Sed 4351193323Sed if (N0.getOpcode() == ISD::TRUNCATE) { 4352193323Sed // fold (sext (truncate (load x))) -> (sext (smaller load x)) 4353193323Sed // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 4354193323Sed SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4355193323Sed if (NarrowLoad.getNode()) { 4356208599Srdivacky SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4357208599Srdivacky if (NarrowLoad.getNode() != N0.getNode()) { 4358193323Sed CombineTo(N0.getNode(), NarrowLoad); 4359208599Srdivacky // CombineTo deleted the truncate, if needed, but not what's under it. 4360208599Srdivacky AddToWorkList(oye); 4361208599Srdivacky } 4362193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4363193323Sed } 4364193323Sed 4365193323Sed // See if the value being truncated is already sign extended. If so, just 4366193323Sed // eliminate the trunc/sext pair. 4367193323Sed SDValue Op = N0.getOperand(0); 4368202375Srdivacky unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits(); 4369202375Srdivacky unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits(); 4370202375Srdivacky unsigned DestBits = VT.getScalarType().getSizeInBits(); 4371193323Sed unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 4372193323Sed 4373193323Sed if (OpBits == DestBits) { 4374193323Sed // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 4375193323Sed // bits, it is already ready. 4376193323Sed if (NumSignBits > DestBits-MidBits) 4377193323Sed return Op; 4378193323Sed } else if (OpBits < DestBits) { 4379193323Sed // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 4380193323Sed // bits, just sext from i32. 4381193323Sed if (NumSignBits > OpBits-MidBits) 4382193323Sed return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, Op); 4383193323Sed } else { 4384193323Sed // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 4385193323Sed // bits, just truncate to i32. 4386193323Sed if (NumSignBits > OpBits-MidBits) 4387193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4388193323Sed } 4389193323Sed 4390193323Sed // fold (sext (truncate x)) -> (sextinreg x). 4391193323Sed if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 4392193323Sed N0.getValueType())) { 4393202375Srdivacky if (OpBits < DestBits) 4394193323Sed Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op); 4395202375Srdivacky else if (OpBits > DestBits) 4396193323Sed Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op); 4397193323Sed return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op, 4398202375Srdivacky DAG.getValueType(N0.getValueType())); 4399193323Sed } 4400193323Sed } 4401193323Sed 4402193323Sed // fold (sext (load x)) -> (sext (truncate (sextload x))) 4403219077Sdim // None of the supported targets knows how to perform load and sign extend 4404221345Sdim // on vectors in one instruction. We only perform this transformation on 4405221345Sdim // scalars. 4406219077Sdim if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4407193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4408193323Sed TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) { 4409193323Sed bool DoXform = true; 4410193323Sed SmallVector<SDNode*, 4> SetCCs; 4411193323Sed if (!N0.hasOneUse()) 4412193323Sed DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 4413193323Sed if (DoXform) { 4414193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4415218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 4416193323Sed LN0->getChain(), 4417218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 4418193323Sed N0.getValueType(), 4419203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4420203954Srdivacky LN0->getAlignment()); 4421193323Sed CombineTo(N, ExtLoad); 4422193323Sed SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4423193323Sed N0.getValueType(), ExtLoad); 4424193323Sed CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4425224145Sdim ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4426224145Sdim ISD::SIGN_EXTEND); 4427193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4428193323Sed } 4429193323Sed } 4430193323Sed 4431193323Sed // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 4432193323Sed // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 4433193323Sed if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 4434193323Sed ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 4435193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4436198090Srdivacky EVT MemVT = LN0->getMemoryVT(); 4437193323Sed if ((!LegalOperations && !LN0->isVolatile()) || 4438198090Srdivacky TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) { 4439218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 4440193323Sed LN0->getChain(), 4441218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 4442218893Sdim MemVT, 4443203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4444203954Srdivacky LN0->getAlignment()); 4445193323Sed CombineTo(N, ExtLoad); 4446193323Sed CombineTo(N0.getNode(), 4447193323Sed DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4448193323Sed N0.getValueType(), ExtLoad), 4449193323Sed ExtLoad.getValue(1)); 4450193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4451193323Sed } 4452193323Sed } 4453193323Sed 4454224145Sdim // fold (sext (and/or/xor (load x), cst)) -> 4455224145Sdim // (and/or/xor (sextload x), (sext cst)) 4456224145Sdim if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 4457224145Sdim N0.getOpcode() == ISD::XOR) && 4458224145Sdim isa<LoadSDNode>(N0.getOperand(0)) && 4459224145Sdim N0.getOperand(1).getOpcode() == ISD::Constant && 4460224145Sdim TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()) && 4461224145Sdim (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 4462224145Sdim LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 4463224145Sdim if (LN0->getExtensionType() != ISD::ZEXTLOAD) { 4464224145Sdim bool DoXform = true; 4465224145Sdim SmallVector<SDNode*, 4> SetCCs; 4466224145Sdim if (!N0.hasOneUse()) 4467224145Sdim DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 4468224145Sdim SetCCs, TLI); 4469224145Sdim if (DoXform) { 4470224145Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, LN0->getDebugLoc(), VT, 4471224145Sdim LN0->getChain(), LN0->getBasePtr(), 4472224145Sdim LN0->getPointerInfo(), 4473224145Sdim LN0->getMemoryVT(), 4474224145Sdim LN0->isVolatile(), 4475224145Sdim LN0->isNonTemporal(), 4476224145Sdim LN0->getAlignment()); 4477224145Sdim APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4478224145Sdim Mask = Mask.sext(VT.getSizeInBits()); 4479224145Sdim SDValue And = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 4480224145Sdim ExtLoad, DAG.getConstant(Mask, VT)); 4481224145Sdim SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 4482224145Sdim N0.getOperand(0).getDebugLoc(), 4483224145Sdim N0.getOperand(0).getValueType(), ExtLoad); 4484224145Sdim CombineTo(N, And); 4485224145Sdim CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 4486224145Sdim ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4487224145Sdim ISD::SIGN_EXTEND); 4488224145Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4489224145Sdim } 4490224145Sdim } 4491224145Sdim } 4492224145Sdim 4493193323Sed if (N0.getOpcode() == ISD::SETCC) { 4494198090Srdivacky // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 4495207618Srdivacky // Only do this before legalize for now. 4496251662Sdim if (VT.isVector() && !LegalOperations && 4497251662Sdim TLI.getBooleanContents(true) == 4498251662Sdim TargetLowering::ZeroOrNegativeOneBooleanContent) { 4499207618Srdivacky EVT N0VT = N0.getOperand(0).getValueType(); 4500234353Sdim // On some architectures (such as SSE/NEON/etc) the SETCC result type is 4501234353Sdim // of the same size as the compared operands. Only optimize sext(setcc()) 4502234353Sdim // if this is the case. 4503234353Sdim EVT SVT = TLI.getSetCCResultType(N0VT); 4504234353Sdim 4505234353Sdim // We know that the # elements of the results is the same as the 4506234353Sdim // # elements of the compare (and the # elements of the compare result 4507234353Sdim // for that matter). Check to see that they are the same size. If so, 4508234353Sdim // we know that the element size of the sext'd result matches the 4509234353Sdim // element size of the compare operands. 4510234353Sdim if (VT.getSizeInBits() == SVT.getSizeInBits()) 4511226633Sdim return DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4512210299Sed N0.getOperand(1), 4513210299Sed cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4514207618Srdivacky // If the desired elements are smaller or larger than the source 4515207618Srdivacky // elements we can use a matching integer vector type and then 4516207618Srdivacky // truncate/sign extend 4517243830Sdim EVT MatchingElementType = 4518243830Sdim EVT::getIntegerVT(*DAG.getContext(), 4519243830Sdim N0VT.getScalarType().getSizeInBits()); 4520243830Sdim EVT MatchingVectorType = 4521243830Sdim EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 4522243830Sdim N0VT.getVectorNumElements()); 4523234353Sdim 4524243830Sdim if (SVT == MatchingVectorType) { 4525243830Sdim SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, 4526243830Sdim N0.getOperand(0), N0.getOperand(1), 4527243830Sdim cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4528243830Sdim return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); 4529207618Srdivacky } 4530198090Srdivacky } 4531207618Srdivacky 4532198090Srdivacky // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc) 4533207618Srdivacky unsigned ElementWidth = VT.getScalarType().getSizeInBits(); 4534198090Srdivacky SDValue NegOne = 4535207618Srdivacky DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT); 4536193323Sed SDValue SCC = 4537193323Sed SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 4538198090Srdivacky NegOne, DAG.getConstant(0, VT), 4539193323Sed cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 4540193323Sed if (SCC.getNode()) return SCC; 4541249423Sdim if (!VT.isVector() && (!LegalOperations || 4542249423Sdim TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(VT)))) 4543203954Srdivacky return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4544203954Srdivacky DAG.getSetCC(N->getDebugLoc(), 4545203954Srdivacky TLI.getSetCCResultType(VT), 4546203954Srdivacky N0.getOperand(0), N0.getOperand(1), 4547203954Srdivacky cast<CondCodeSDNode>(N0.getOperand(2))->get()), 4548203954Srdivacky NegOne, DAG.getConstant(0, VT)); 4549218893Sdim } 4550193323Sed 4551193323Sed // fold (sext x) -> (zext x) if the sign bit is known zero. 4552193323Sed if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 4553193323Sed DAG.SignBitIsZero(N0)) 4554193323Sed return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0); 4555193323Sed 4556193323Sed return SDValue(); 4557193323Sed} 4558193323Sed 4559234353Sdim// isTruncateOf - If N is a truncate of some other value, return true, record 4560234353Sdim// the value being truncated in Op and which of Op's bits are zero in KnownZero. 4561234353Sdim// This function computes KnownZero to avoid a duplicated call to 4562234353Sdim// ComputeMaskedBits in the caller. 4563234353Sdimstatic bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 4564234353Sdim APInt &KnownZero) { 4565234353Sdim APInt KnownOne; 4566234353Sdim if (N->getOpcode() == ISD::TRUNCATE) { 4567234353Sdim Op = N->getOperand(0); 4568234353Sdim DAG.ComputeMaskedBits(Op, KnownZero, KnownOne); 4569234353Sdim return true; 4570234353Sdim } 4571234353Sdim 4572234353Sdim if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 4573234353Sdim cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 4574234353Sdim return false; 4575234353Sdim 4576234353Sdim SDValue Op0 = N->getOperand(0); 4577234353Sdim SDValue Op1 = N->getOperand(1); 4578234353Sdim assert(Op0.getValueType() == Op1.getValueType()); 4579234353Sdim 4580234353Sdim ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0); 4581234353Sdim ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1); 4582234353Sdim if (COp0 && COp0->isNullValue()) 4583234353Sdim Op = Op1; 4584234353Sdim else if (COp1 && COp1->isNullValue()) 4585234353Sdim Op = Op0; 4586234353Sdim else 4587234353Sdim return false; 4588234353Sdim 4589234353Sdim DAG.ComputeMaskedBits(Op, KnownZero, KnownOne); 4590234353Sdim 4591234353Sdim if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 4592234353Sdim return false; 4593234353Sdim 4594234353Sdim return true; 4595234353Sdim} 4596234353Sdim 4597193323SedSDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 4598193323Sed SDValue N0 = N->getOperand(0); 4599198090Srdivacky EVT VT = N->getValueType(0); 4600193323Sed 4601193323Sed // fold (zext c1) -> c1 4602193323Sed if (isa<ConstantSDNode>(N0)) 4603193323Sed return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0); 4604193323Sed // fold (zext (zext x)) -> (zext x) 4605193323Sed // fold (zext (aext x)) -> (zext x) 4606193323Sed if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 4607193323Sed return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, 4608193323Sed N0.getOperand(0)); 4609193323Sed 4610234353Sdim // fold (zext (truncate x)) -> (zext x) or 4611234353Sdim // (zext (truncate x)) -> (truncate x) 4612234353Sdim // This is valid when the truncated bits of x are already zero. 4613234353Sdim // FIXME: We should extend this to work for vectors too. 4614234353Sdim SDValue Op; 4615234353Sdim APInt KnownZero; 4616234353Sdim if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 4617234353Sdim APInt TruncatedBits = 4618234353Sdim (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 4619234353Sdim APInt(Op.getValueSizeInBits(), 0) : 4620234353Sdim APInt::getBitsSet(Op.getValueSizeInBits(), 4621234353Sdim N0.getValueSizeInBits(), 4622234353Sdim std::min(Op.getValueSizeInBits(), 4623234353Sdim VT.getSizeInBits())); 4624234353Sdim if (TruncatedBits == (KnownZero & TruncatedBits)) { 4625234353Sdim if (VT.bitsGT(Op.getValueType())) 4626234353Sdim return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, Op); 4627234353Sdim if (VT.bitsLT(Op.getValueType())) 4628234353Sdim return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4629234353Sdim 4630234353Sdim return Op; 4631234353Sdim } 4632234353Sdim } 4633234353Sdim 4634193323Sed // fold (zext (truncate (load x))) -> (zext (smaller load x)) 4635193323Sed // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 4636193323Sed if (N0.getOpcode() == ISD::TRUNCATE) { 4637193323Sed SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4638193323Sed if (NarrowLoad.getNode()) { 4639208599Srdivacky SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4640208599Srdivacky if (NarrowLoad.getNode() != N0.getNode()) { 4641193323Sed CombineTo(N0.getNode(), NarrowLoad); 4642208599Srdivacky // CombineTo deleted the truncate, if needed, but not what's under it. 4643208599Srdivacky AddToWorkList(oye); 4644208599Srdivacky } 4645221345Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4646193323Sed } 4647193323Sed } 4648193323Sed 4649193323Sed // fold (zext (truncate x)) -> (and x, mask) 4650193323Sed if (N0.getOpcode() == ISD::TRUNCATE && 4651210299Sed (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) { 4652218893Sdim 4653218893Sdim // fold (zext (truncate (load x))) -> (zext (smaller load x)) 4654218893Sdim // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 4655218893Sdim SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4656218893Sdim if (NarrowLoad.getNode()) { 4657218893Sdim SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4658218893Sdim if (NarrowLoad.getNode() != N0.getNode()) { 4659218893Sdim CombineTo(N0.getNode(), NarrowLoad); 4660218893Sdim // CombineTo deleted the truncate, if needed, but not what's under it. 4661218893Sdim AddToWorkList(oye); 4662218893Sdim } 4663218893Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4664218893Sdim } 4665218893Sdim 4666193323Sed SDValue Op = N0.getOperand(0); 4667193323Sed if (Op.getValueType().bitsLT(VT)) { 4668193323Sed Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op); 4669239462Sdim AddToWorkList(Op.getNode()); 4670193323Sed } else if (Op.getValueType().bitsGT(VT)) { 4671193323Sed Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4672239462Sdim AddToWorkList(Op.getNode()); 4673193323Sed } 4674200581Srdivacky return DAG.getZeroExtendInReg(Op, N->getDebugLoc(), 4675200581Srdivacky N0.getValueType().getScalarType()); 4676193323Sed } 4677193323Sed 4678193323Sed // Fold (zext (and (trunc x), cst)) -> (and x, cst), 4679193323Sed // if either of the casts is not free. 4680193323Sed if (N0.getOpcode() == ISD::AND && 4681193323Sed N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 4682193323Sed N0.getOperand(1).getOpcode() == ISD::Constant && 4683193323Sed (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 4684193323Sed N0.getValueType()) || 4685193323Sed !TLI.isZExtFree(N0.getValueType(), VT))) { 4686193323Sed SDValue X = N0.getOperand(0).getOperand(0); 4687193323Sed if (X.getValueType().bitsLT(VT)) { 4688193323Sed X = DAG.getNode(ISD::ANY_EXTEND, X.getDebugLoc(), VT, X); 4689193323Sed } else if (X.getValueType().bitsGT(VT)) { 4690193323Sed X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X); 4691193323Sed } 4692193323Sed APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4693218893Sdim Mask = Mask.zext(VT.getSizeInBits()); 4694193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4695193323Sed X, DAG.getConstant(Mask, VT)); 4696193323Sed } 4697193323Sed 4698193323Sed // fold (zext (load x)) -> (zext (truncate (zextload x))) 4699219077Sdim // None of the supported targets knows how to perform load and vector_zext 4700221345Sdim // on vectors in one instruction. We only perform this transformation on 4701221345Sdim // scalars. 4702219077Sdim if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4703193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4704193323Sed TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) { 4705193323Sed bool DoXform = true; 4706193323Sed SmallVector<SDNode*, 4> SetCCs; 4707193323Sed if (!N0.hasOneUse()) 4708193323Sed DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 4709193323Sed if (DoXform) { 4710193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4711218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT, 4712193323Sed LN0->getChain(), 4713218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 4714193323Sed N0.getValueType(), 4715203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4716203954Srdivacky LN0->getAlignment()); 4717193323Sed CombineTo(N, ExtLoad); 4718193323Sed SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4719193323Sed N0.getValueType(), ExtLoad); 4720193323Sed CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4721193323Sed 4722224145Sdim ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4723224145Sdim ISD::ZERO_EXTEND); 4724224145Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4725224145Sdim } 4726224145Sdim } 4727193323Sed 4728224145Sdim // fold (zext (and/or/xor (load x), cst)) -> 4729224145Sdim // (and/or/xor (zextload x), (zext cst)) 4730224145Sdim if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 4731224145Sdim N0.getOpcode() == ISD::XOR) && 4732224145Sdim isa<LoadSDNode>(N0.getOperand(0)) && 4733224145Sdim N0.getOperand(1).getOpcode() == ISD::Constant && 4734224145Sdim TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()) && 4735224145Sdim (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 4736224145Sdim LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 4737224145Sdim if (LN0->getExtensionType() != ISD::SEXTLOAD) { 4738224145Sdim bool DoXform = true; 4739224145Sdim SmallVector<SDNode*, 4> SetCCs; 4740224145Sdim if (!N0.hasOneUse()) 4741224145Sdim DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::ZERO_EXTEND, 4742224145Sdim SetCCs, TLI); 4743224145Sdim if (DoXform) { 4744224145Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), VT, 4745224145Sdim LN0->getChain(), LN0->getBasePtr(), 4746224145Sdim LN0->getPointerInfo(), 4747224145Sdim LN0->getMemoryVT(), 4748224145Sdim LN0->isVolatile(), 4749224145Sdim LN0->isNonTemporal(), 4750224145Sdim LN0->getAlignment()); 4751224145Sdim APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4752224145Sdim Mask = Mask.zext(VT.getSizeInBits()); 4753224145Sdim SDValue And = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 4754224145Sdim ExtLoad, DAG.getConstant(Mask, VT)); 4755224145Sdim SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 4756224145Sdim N0.getOperand(0).getDebugLoc(), 4757224145Sdim N0.getOperand(0).getValueType(), ExtLoad); 4758224145Sdim CombineTo(N, And); 4759224145Sdim CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 4760224145Sdim ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4761224145Sdim ISD::ZERO_EXTEND); 4762224145Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4763193323Sed } 4764193323Sed } 4765193323Sed } 4766193323Sed 4767193323Sed // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 4768193323Sed // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 4769193323Sed if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 4770193323Sed ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 4771193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4772198090Srdivacky EVT MemVT = LN0->getMemoryVT(); 4773193323Sed if ((!LegalOperations && !LN0->isVolatile()) || 4774198090Srdivacky TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) { 4775218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT, 4776193323Sed LN0->getChain(), 4777218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 4778218893Sdim MemVT, 4779203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4780203954Srdivacky LN0->getAlignment()); 4781193323Sed CombineTo(N, ExtLoad); 4782193323Sed CombineTo(N0.getNode(), 4783193323Sed DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), N0.getValueType(), 4784193323Sed ExtLoad), 4785193323Sed ExtLoad.getValue(1)); 4786193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4787193323Sed } 4788193323Sed } 4789193323Sed 4790193323Sed if (N0.getOpcode() == ISD::SETCC) { 4791208599Srdivacky if (!LegalOperations && VT.isVector()) { 4792208599Srdivacky // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 4793208599Srdivacky // Only do this before legalize for now. 4794208599Srdivacky EVT N0VT = N0.getOperand(0).getValueType(); 4795208599Srdivacky EVT EltVT = VT.getVectorElementType(); 4796208599Srdivacky SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(), 4797208599Srdivacky DAG.getConstant(1, EltVT)); 4798223017Sdim if (VT.getSizeInBits() == N0VT.getSizeInBits()) 4799208599Srdivacky // We know that the # elements of the results is the same as the 4800208599Srdivacky // # elements of the compare (and the # elements of the compare result 4801208599Srdivacky // for that matter). Check to see that they are the same size. If so, 4802208599Srdivacky // we know that the element size of the sext'd result matches the 4803208599Srdivacky // element size of the compare operands. 4804208599Srdivacky return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4805226633Sdim DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4806208599Srdivacky N0.getOperand(1), 4807208599Srdivacky cast<CondCodeSDNode>(N0.getOperand(2))->get()), 4808208599Srdivacky DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT, 4809208599Srdivacky &OneOps[0], OneOps.size())); 4810223017Sdim 4811223017Sdim // If the desired elements are smaller or larger than the source 4812223017Sdim // elements we can use a matching integer vector type and then 4813223017Sdim // truncate/sign extend 4814223017Sdim EVT MatchingElementType = 4815223017Sdim EVT::getIntegerVT(*DAG.getContext(), 4816223017Sdim N0VT.getScalarType().getSizeInBits()); 4817223017Sdim EVT MatchingVectorType = 4818223017Sdim EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 4819223017Sdim N0VT.getVectorNumElements()); 4820223017Sdim SDValue VsetCC = 4821226633Sdim DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0), 4822223017Sdim N0.getOperand(1), 4823223017Sdim cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4824223017Sdim return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4825223017Sdim DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT), 4826223017Sdim DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT, 4827223017Sdim &OneOps[0], OneOps.size())); 4828208599Srdivacky } 4829208599Srdivacky 4830208599Srdivacky // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 4831193323Sed SDValue SCC = 4832193323Sed SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 4833193323Sed DAG.getConstant(1, VT), DAG.getConstant(0, VT), 4834193323Sed cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 4835193323Sed if (SCC.getNode()) return SCC; 4836193323Sed } 4837193323Sed 4838200581Srdivacky // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 4839200581Srdivacky if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 4840200581Srdivacky isa<ConstantSDNode>(N0.getOperand(1)) && 4841200581Srdivacky N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 4842200581Srdivacky N0.hasOneUse()) { 4843218893Sdim SDValue ShAmt = N0.getOperand(1); 4844218893Sdim unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 4845200581Srdivacky if (N0.getOpcode() == ISD::SHL) { 4846218893Sdim SDValue InnerZExt = N0.getOperand(0); 4847200581Srdivacky // If the original shl may be shifting out bits, do not perform this 4848200581Srdivacky // transformation. 4849218893Sdim unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() - 4850218893Sdim InnerZExt.getOperand(0).getValueType().getSizeInBits(); 4851218893Sdim if (ShAmtVal > KnownZeroBits) 4852200581Srdivacky return SDValue(); 4853200581Srdivacky } 4854218893Sdim 4855218893Sdim DebugLoc DL = N->getDebugLoc(); 4856219077Sdim 4857219077Sdim // Ensure that the shift amount is wide enough for the shifted value. 4858218893Sdim if (VT.getSizeInBits() >= 256) 4859218893Sdim ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 4860219077Sdim 4861218893Sdim return DAG.getNode(N0.getOpcode(), DL, VT, 4862218893Sdim DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 4863218893Sdim ShAmt); 4864200581Srdivacky } 4865200581Srdivacky 4866193323Sed return SDValue(); 4867193323Sed} 4868193323Sed 4869193323SedSDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 4870193323Sed SDValue N0 = N->getOperand(0); 4871198090Srdivacky EVT VT = N->getValueType(0); 4872193323Sed 4873193323Sed // fold (aext c1) -> c1 4874193323Sed if (isa<ConstantSDNode>(N0)) 4875193323Sed return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, N0); 4876193323Sed // fold (aext (aext x)) -> (aext x) 4877193323Sed // fold (aext (zext x)) -> (zext x) 4878193323Sed // fold (aext (sext x)) -> (sext x) 4879193323Sed if (N0.getOpcode() == ISD::ANY_EXTEND || 4880193323Sed N0.getOpcode() == ISD::ZERO_EXTEND || 4881193323Sed N0.getOpcode() == ISD::SIGN_EXTEND) 4882193323Sed return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0)); 4883193323Sed 4884193323Sed // fold (aext (truncate (load x))) -> (aext (smaller load x)) 4885193323Sed // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 4886193323Sed if (N0.getOpcode() == ISD::TRUNCATE) { 4887193323Sed SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4888193323Sed if (NarrowLoad.getNode()) { 4889208599Srdivacky SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4890208599Srdivacky if (NarrowLoad.getNode() != N0.getNode()) { 4891193323Sed CombineTo(N0.getNode(), NarrowLoad); 4892208599Srdivacky // CombineTo deleted the truncate, if needed, but not what's under it. 4893208599Srdivacky AddToWorkList(oye); 4894208599Srdivacky } 4895221345Sdim return SDValue(N, 0); // Return N so it doesn't get rechecked! 4896193323Sed } 4897193323Sed } 4898193323Sed 4899193323Sed // fold (aext (truncate x)) 4900193323Sed if (N0.getOpcode() == ISD::TRUNCATE) { 4901193323Sed SDValue TruncOp = N0.getOperand(0); 4902193323Sed if (TruncOp.getValueType() == VT) 4903193323Sed return TruncOp; // x iff x size == zext size. 4904193323Sed if (TruncOp.getValueType().bitsGT(VT)) 4905193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp); 4906193323Sed return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp); 4907193323Sed } 4908193323Sed 4909193323Sed // Fold (aext (and (trunc x), cst)) -> (and x, cst) 4910193323Sed // if the trunc is not free. 4911193323Sed if (N0.getOpcode() == ISD::AND && 4912193323Sed N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 4913193323Sed N0.getOperand(1).getOpcode() == ISD::Constant && 4914193323Sed !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 4915193323Sed N0.getValueType())) { 4916193323Sed SDValue X = N0.getOperand(0).getOperand(0); 4917193323Sed if (X.getValueType().bitsLT(VT)) { 4918193323Sed X = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, X); 4919193323Sed } else if (X.getValueType().bitsGT(VT)) { 4920193323Sed X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X); 4921193323Sed } 4922193323Sed APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4923218893Sdim Mask = Mask.zext(VT.getSizeInBits()); 4924193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4925193323Sed X, DAG.getConstant(Mask, VT)); 4926193323Sed } 4927193323Sed 4928193323Sed // fold (aext (load x)) -> (aext (truncate (extload x))) 4929219077Sdim // None of the supported targets knows how to perform load and any_ext 4930221345Sdim // on vectors in one instruction. We only perform this transformation on 4931221345Sdim // scalars. 4932219077Sdim if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4933193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4934193323Sed TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) { 4935193323Sed bool DoXform = true; 4936193323Sed SmallVector<SDNode*, 4> SetCCs; 4937193323Sed if (!N0.hasOneUse()) 4938193323Sed DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 4939193323Sed if (DoXform) { 4940193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4941218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT, 4942193323Sed LN0->getChain(), 4943218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 4944193323Sed N0.getValueType(), 4945203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4946203954Srdivacky LN0->getAlignment()); 4947193323Sed CombineTo(N, ExtLoad); 4948193323Sed SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4949193323Sed N0.getValueType(), ExtLoad); 4950193323Sed CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4951224145Sdim ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4952224145Sdim ISD::ANY_EXTEND); 4953193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4954193323Sed } 4955193323Sed } 4956193323Sed 4957193323Sed // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 4958193323Sed // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 4959193323Sed // fold (aext ( extload x)) -> (aext (truncate (extload x))) 4960193323Sed if (N0.getOpcode() == ISD::LOAD && 4961193323Sed !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 4962193323Sed N0.hasOneUse()) { 4963193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4964198090Srdivacky EVT MemVT = LN0->getMemoryVT(); 4965218893Sdim SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(), 4966218893Sdim VT, LN0->getChain(), LN0->getBasePtr(), 4967218893Sdim LN0->getPointerInfo(), MemVT, 4968203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 4969203954Srdivacky LN0->getAlignment()); 4970193323Sed CombineTo(N, ExtLoad); 4971193323Sed CombineTo(N0.getNode(), 4972193323Sed DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4973193323Sed N0.getValueType(), ExtLoad), 4974193323Sed ExtLoad.getValue(1)); 4975193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 4976193323Sed } 4977193323Sed 4978193323Sed if (N0.getOpcode() == ISD::SETCC) { 4979208599Srdivacky // aext(setcc) -> sext_in_reg(vsetcc) for vectors. 4980208599Srdivacky // Only do this before legalize for now. 4981208599Srdivacky if (VT.isVector() && !LegalOperations) { 4982208599Srdivacky EVT N0VT = N0.getOperand(0).getValueType(); 4983208599Srdivacky // We know that the # elements of the results is the same as the 4984208599Srdivacky // # elements of the compare (and the # elements of the compare result 4985208599Srdivacky // for that matter). Check to see that they are the same size. If so, 4986208599Srdivacky // we know that the element size of the sext'd result matches the 4987208599Srdivacky // element size of the compare operands. 4988208599Srdivacky if (VT.getSizeInBits() == N0VT.getSizeInBits()) 4989226633Sdim return DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4990210299Sed N0.getOperand(1), 4991210299Sed cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4992208599Srdivacky // If the desired elements are smaller or larger than the source 4993208599Srdivacky // elements we can use a matching integer vector type and then 4994208599Srdivacky // truncate/sign extend 4995208599Srdivacky else { 4996210299Sed EVT MatchingElementType = 4997210299Sed EVT::getIntegerVT(*DAG.getContext(), 4998210299Sed N0VT.getScalarType().getSizeInBits()); 4999210299Sed EVT MatchingVectorType = 5000210299Sed EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 5001210299Sed N0VT.getVectorNumElements()); 5002210299Sed SDValue VsetCC = 5003226633Sdim DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0), 5004210299Sed N0.getOperand(1), 5005210299Sed cast<CondCodeSDNode>(N0.getOperand(2))->get()); 5006210299Sed return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); 5007208599Srdivacky } 5008208599Srdivacky } 5009208599Srdivacky 5010208599Srdivacky // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 5011193323Sed SDValue SCC = 5012193323Sed SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 5013193323Sed DAG.getConstant(1, VT), DAG.getConstant(0, VT), 5014193323Sed cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 5015193323Sed if (SCC.getNode()) 5016193323Sed return SCC; 5017193323Sed } 5018193323Sed 5019193323Sed return SDValue(); 5020193323Sed} 5021193323Sed 5022193323Sed/// GetDemandedBits - See if the specified operand can be simplified with the 5023193323Sed/// knowledge that only the bits specified by Mask are used. If so, return the 5024193323Sed/// simpler operand, otherwise return a null SDValue. 5025193323SedSDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 5026193323Sed switch (V.getOpcode()) { 5027193323Sed default: break; 5028234353Sdim case ISD::Constant: { 5029234353Sdim const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 5030234353Sdim assert(CV != 0 && "Const value should be ConstSDNode."); 5031234353Sdim const APInt &CVal = CV->getAPIntValue(); 5032234353Sdim APInt NewVal = CVal & Mask; 5033234353Sdim if (NewVal != CVal) { 5034234353Sdim return DAG.getConstant(NewVal, V.getValueType()); 5035234353Sdim } 5036234353Sdim break; 5037234353Sdim } 5038193323Sed case ISD::OR: 5039193323Sed case ISD::XOR: 5040193323Sed // If the LHS or RHS don't contribute bits to the or, drop them. 5041193323Sed if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 5042193323Sed return V.getOperand(1); 5043193323Sed if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 5044193323Sed return V.getOperand(0); 5045193323Sed break; 5046193323Sed case ISD::SRL: 5047193323Sed // Only look at single-use SRLs. 5048193323Sed if (!V.getNode()->hasOneUse()) 5049193323Sed break; 5050193323Sed if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 5051193323Sed // See if we can recursively simplify the LHS. 5052193323Sed unsigned Amt = RHSC->getZExtValue(); 5053193323Sed 5054193323Sed // Watch out for shift count overflow though. 5055193323Sed if (Amt >= Mask.getBitWidth()) break; 5056193323Sed APInt NewMask = Mask << Amt; 5057193323Sed SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask); 5058193323Sed if (SimplifyLHS.getNode()) 5059193323Sed return DAG.getNode(ISD::SRL, V.getDebugLoc(), V.getValueType(), 5060193323Sed SimplifyLHS, V.getOperand(1)); 5061193323Sed } 5062193323Sed } 5063193323Sed return SDValue(); 5064193323Sed} 5065193323Sed 5066193323Sed/// ReduceLoadWidth - If the result of a wider load is shifted to right of N 5067193323Sed/// bits and then truncated to a narrower type and where N is a multiple 5068193323Sed/// of number of bits of the narrower type, transform it to a narrower load 5069193323Sed/// from address + N / num of bits of new type. If the result is to be 5070193323Sed/// extended, also fold the extension to form a extending load. 5071193323SedSDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 5072193323Sed unsigned Opc = N->getOpcode(); 5073210299Sed 5074193323Sed ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 5075193323Sed SDValue N0 = N->getOperand(0); 5076198090Srdivacky EVT VT = N->getValueType(0); 5077198090Srdivacky EVT ExtVT = VT; 5078193323Sed 5079193323Sed // This transformation isn't valid for vector loads. 5080193323Sed if (VT.isVector()) 5081193323Sed return SDValue(); 5082193323Sed 5083202375Srdivacky // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 5084193323Sed // extended to VT. 5085193323Sed if (Opc == ISD::SIGN_EXTEND_INREG) { 5086193323Sed ExtType = ISD::SEXTLOAD; 5087198090Srdivacky ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 5088210299Sed } else if (Opc == ISD::SRL) { 5089218893Sdim // Another special-case: SRL is basically zero-extending a narrower value. 5090210299Sed ExtType = ISD::ZEXTLOAD; 5091210299Sed N0 = SDValue(N, 0); 5092210299Sed ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5093210299Sed if (!N01) return SDValue(); 5094210299Sed ExtVT = EVT::getIntegerVT(*DAG.getContext(), 5095210299Sed VT.getSizeInBits() - N01->getZExtValue()); 5096193323Sed } 5097218893Sdim if (LegalOperations && !TLI.isLoadExtLegal(ExtType, ExtVT)) 5098218893Sdim return SDValue(); 5099193323Sed 5100198090Srdivacky unsigned EVTBits = ExtVT.getSizeInBits(); 5101219077Sdim 5102218893Sdim // Do not generate loads of non-round integer types since these can 5103218893Sdim // be expensive (and would be wrong if the type is not byte sized). 5104218893Sdim if (!ExtVT.isRound()) 5105218893Sdim return SDValue(); 5106219077Sdim 5107193323Sed unsigned ShAmt = 0; 5108218893Sdim if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 5109193323Sed if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5110193323Sed ShAmt = N01->getZExtValue(); 5111193323Sed // Is the shift amount a multiple of size of VT? 5112193323Sed if ((ShAmt & (EVTBits-1)) == 0) { 5113193323Sed N0 = N0.getOperand(0); 5114198090Srdivacky // Is the load width a multiple of size of VT? 5115198090Srdivacky if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0) 5116193323Sed return SDValue(); 5117193323Sed } 5118218893Sdim 5119218893Sdim // At this point, we must have a load or else we can't do the transform. 5120218893Sdim if (!isa<LoadSDNode>(N0)) return SDValue(); 5121219077Sdim 5122249423Sdim // Because a SRL must be assumed to *need* to zero-extend the high bits 5123249423Sdim // (as opposed to anyext the high bits), we can't combine the zextload 5124249423Sdim // lowering of SRL and an sextload. 5125249423Sdim if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD) 5126249423Sdim return SDValue(); 5127249423Sdim 5128218893Sdim // If the shift amount is larger than the input type then we're not 5129218893Sdim // accessing any of the loaded bytes. If the load was a zextload/extload 5130218893Sdim // then the result of the shift+trunc is zero/undef (handled elsewhere). 5131218893Sdim if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 5132218893Sdim return SDValue(); 5133193323Sed } 5134193323Sed } 5135193323Sed 5136218893Sdim // If the load is shifted left (and the result isn't shifted back right), 5137218893Sdim // we can fold the truncate through the shift. 5138218893Sdim unsigned ShLeftAmt = 0; 5139218893Sdim if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 5140218893Sdim ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 5141218893Sdim if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5142218893Sdim ShLeftAmt = N01->getZExtValue(); 5143218893Sdim N0 = N0.getOperand(0); 5144193323Sed } 5145218893Sdim } 5146219077Sdim 5147218893Sdim // If we haven't found a load, we can't narrow it. Don't transform one with 5148218893Sdim // multiple uses, this would require adding a new load. 5149249423Sdim if (!isa<LoadSDNode>(N0) || !N0.hasOneUse()) 5150218893Sdim return SDValue(); 5151219077Sdim 5152249423Sdim // Don't change the width of a volatile load. 5153249423Sdim LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5154249423Sdim if (LN0->isVolatile()) 5155249423Sdim return SDValue(); 5156249423Sdim 5157218893Sdim // Verify that we are actually reducing a load width here. 5158249423Sdim if (LN0->getMemoryVT().getSizeInBits() < EVTBits) 5159218893Sdim return SDValue(); 5160219077Sdim 5161249423Sdim // For the transform to be legal, the load must produce only two values 5162249423Sdim // (the value loaded and the chain). Don't transform a pre-increment 5163249423Sdim // load, for example, which produces an extra value. Otherwise the 5164249423Sdim // transformation is not equivalent, and the downstream logic to replace 5165249423Sdim // uses gets things wrong. 5166249423Sdim if (LN0->getNumValues() > 2) 5167249423Sdim return SDValue(); 5168249423Sdim 5169218893Sdim EVT PtrType = N0.getOperand(1).getValueType(); 5170193323Sed 5171239462Sdim if (PtrType == MVT::Untyped || PtrType.isExtended()) 5172239462Sdim // It's not possible to generate a constant of extended or untyped type. 5173239462Sdim return SDValue(); 5174239462Sdim 5175218893Sdim // For big endian targets, we need to adjust the offset to the pointer to 5176218893Sdim // load the correct bytes. 5177218893Sdim if (TLI.isBigEndian()) { 5178218893Sdim unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 5179218893Sdim unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 5180218893Sdim ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 5181218893Sdim } 5182193323Sed 5183218893Sdim uint64_t PtrOff = ShAmt / 8; 5184218893Sdim unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 5185218893Sdim SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), 5186218893Sdim PtrType, LN0->getBasePtr(), 5187218893Sdim DAG.getConstant(PtrOff, PtrType)); 5188218893Sdim AddToWorkList(NewPtr.getNode()); 5189193323Sed 5190218893Sdim SDValue Load; 5191218893Sdim if (ExtType == ISD::NON_EXTLOAD) 5192218893Sdim Load = DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr, 5193218893Sdim LN0->getPointerInfo().getWithOffset(PtrOff), 5194234353Sdim LN0->isVolatile(), LN0->isNonTemporal(), 5195234353Sdim LN0->isInvariant(), NewAlign); 5196218893Sdim else 5197218893Sdim Load = DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(),NewPtr, 5198218893Sdim LN0->getPointerInfo().getWithOffset(PtrOff), 5199218893Sdim ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 5200218893Sdim NewAlign); 5201193323Sed 5202218893Sdim // Replace the old load's chain with the new load's chain. 5203218893Sdim WorkListRemover DeadNodes(*this); 5204239462Sdim DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 5205218893Sdim 5206218893Sdim // Shift the result left, if we've swallowed a left shift. 5207218893Sdim SDValue Result = Load; 5208218893Sdim if (ShLeftAmt != 0) { 5209219077Sdim EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 5210218893Sdim if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 5211218893Sdim ShImmTy = VT; 5212249423Sdim // If the shift amount is as large as the result size (but, presumably, 5213249423Sdim // no larger than the source) then the useful bits of the result are 5214249423Sdim // zero; we can't simply return the shortened shift, because the result 5215249423Sdim // of that operation is undefined. 5216249423Sdim if (ShLeftAmt >= VT.getSizeInBits()) 5217249423Sdim Result = DAG.getConstant(0, VT); 5218249423Sdim else 5219249423Sdim Result = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, 5220249423Sdim Result, DAG.getConstant(ShLeftAmt, ShImmTy)); 5221193323Sed } 5222193323Sed 5223218893Sdim // Return the new loaded value. 5224218893Sdim return Result; 5225193323Sed} 5226193323Sed 5227193323SedSDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 5228193323Sed SDValue N0 = N->getOperand(0); 5229193323Sed SDValue N1 = N->getOperand(1); 5230198090Srdivacky EVT VT = N->getValueType(0); 5231198090Srdivacky EVT EVT = cast<VTSDNode>(N1)->getVT(); 5232200581Srdivacky unsigned VTBits = VT.getScalarType().getSizeInBits(); 5233202375Srdivacky unsigned EVTBits = EVT.getScalarType().getSizeInBits(); 5234193323Sed 5235193323Sed // fold (sext_in_reg c1) -> c1 5236193323Sed if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF) 5237193323Sed return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, N0, N1); 5238193323Sed 5239193323Sed // If the input is already sign extended, just drop the extension. 5240200581Srdivacky if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 5241193323Sed return N0; 5242193323Sed 5243193323Sed // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 5244193323Sed if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 5245193323Sed EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) { 5246193323Sed return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 5247193323Sed N0.getOperand(0), N1); 5248193323Sed } 5249193323Sed 5250193323Sed // fold (sext_in_reg (sext x)) -> (sext x) 5251193323Sed // fold (sext_in_reg (aext x)) -> (sext x) 5252193323Sed // if x is small enough. 5253193323Sed if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 5254193323Sed SDValue N00 = N0.getOperand(0); 5255207618Srdivacky if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits && 5256207618Srdivacky (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 5257193323Sed return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1); 5258193323Sed } 5259193323Sed 5260193323Sed // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 5261193323Sed if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) 5262193323Sed return DAG.getZeroExtendInReg(N0, N->getDebugLoc(), EVT); 5263193323Sed 5264193323Sed // fold operands of sext_in_reg based on knowledge that the top bits are not 5265193323Sed // demanded. 5266193323Sed if (SimplifyDemandedBits(SDValue(N, 0))) 5267193323Sed return SDValue(N, 0); 5268193323Sed 5269193323Sed // fold (sext_in_reg (load x)) -> (smaller sextload x) 5270193323Sed // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 5271193323Sed SDValue NarrowLoad = ReduceLoadWidth(N); 5272193323Sed if (NarrowLoad.getNode()) 5273193323Sed return NarrowLoad; 5274193323Sed 5275193323Sed // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 5276193323Sed // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 5277193323Sed // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 5278193323Sed if (N0.getOpcode() == ISD::SRL) { 5279193323Sed if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 5280200581Srdivacky if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 5281193323Sed // We can turn this into an SRA iff the input to the SRL is already sign 5282193323Sed // extended enough. 5283193323Sed unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 5284200581Srdivacky if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 5285193323Sed return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, 5286193323Sed N0.getOperand(0), N0.getOperand(1)); 5287193323Sed } 5288193323Sed } 5289193323Sed 5290193323Sed // fold (sext_inreg (extload x)) -> (sextload x) 5291193323Sed if (ISD::isEXTLoad(N0.getNode()) && 5292193323Sed ISD::isUNINDEXEDLoad(N0.getNode()) && 5293193323Sed EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5294193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5295193323Sed TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5296193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5297218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 5298193323Sed LN0->getChain(), 5299218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 5300218893Sdim EVT, 5301203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 5302203954Srdivacky LN0->getAlignment()); 5303193323Sed CombineTo(N, ExtLoad); 5304193323Sed CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5305249423Sdim AddToWorkList(ExtLoad.getNode()); 5306193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 5307193323Sed } 5308193323Sed // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 5309193323Sed if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 5310193323Sed N0.hasOneUse() && 5311193323Sed EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5312193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5313193323Sed TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5314193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5315218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 5316193323Sed LN0->getChain(), 5317218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 5318218893Sdim EVT, 5319203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 5320203954Srdivacky LN0->getAlignment()); 5321193323Sed CombineTo(N, ExtLoad); 5322193323Sed CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5323193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 5324193323Sed } 5325224145Sdim 5326224145Sdim // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 5327224145Sdim if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 5328224145Sdim SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 5329224145Sdim N0.getOperand(1), false); 5330224145Sdim if (BSwap.getNode() != 0) 5331224145Sdim return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 5332224145Sdim BSwap, N1); 5333224145Sdim } 5334224145Sdim 5335193323Sed return SDValue(); 5336193323Sed} 5337193323Sed 5338193323SedSDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 5339193323Sed SDValue N0 = N->getOperand(0); 5340198090Srdivacky EVT VT = N->getValueType(0); 5341234353Sdim bool isLE = TLI.isLittleEndian(); 5342193323Sed 5343193323Sed // noop truncate 5344193323Sed if (N0.getValueType() == N->getValueType(0)) 5345193323Sed return N0; 5346193323Sed // fold (truncate c1) -> c1 5347193323Sed if (isa<ConstantSDNode>(N0)) 5348193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0); 5349193323Sed // fold (truncate (truncate x)) -> (truncate x) 5350193323Sed if (N0.getOpcode() == ISD::TRUNCATE) 5351193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0)); 5352193323Sed // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 5353207618Srdivacky if (N0.getOpcode() == ISD::ZERO_EXTEND || 5354207618Srdivacky N0.getOpcode() == ISD::SIGN_EXTEND || 5355193323Sed N0.getOpcode() == ISD::ANY_EXTEND) { 5356193323Sed if (N0.getOperand(0).getValueType().bitsLT(VT)) 5357193323Sed // if the source is smaller than the dest, we still need an extend 5358193323Sed return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 5359193323Sed N0.getOperand(0)); 5360243830Sdim if (N0.getOperand(0).getValueType().bitsGT(VT)) 5361193323Sed // if the source is larger than the dest, than we just need the truncate 5362193323Sed return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0)); 5363243830Sdim // if the source and dest are the same type, we can drop both the extend 5364243830Sdim // and the truncate. 5365243830Sdim return N0.getOperand(0); 5366193323Sed } 5367193323Sed 5368234353Sdim // Fold extract-and-trunc into a narrow extract. For example: 5369234353Sdim // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 5370234353Sdim // i32 y = TRUNCATE(i64 x) 5371234353Sdim // -- becomes -- 5372234353Sdim // v16i8 b = BITCAST (v2i64 val) 5373234353Sdim // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 5374234353Sdim // 5375234353Sdim // Note: We only run this optimization after type legalization (which often 5376234353Sdim // creates this pattern) and before operation legalization after which 5377234353Sdim // we need to be more careful about the vector instructions that we generate. 5378234353Sdim if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5379234353Sdim LegalTypes && !LegalOperations && N0->hasOneUse()) { 5380234353Sdim 5381234353Sdim EVT VecTy = N0.getOperand(0).getValueType(); 5382234353Sdim EVT ExTy = N0.getValueType(); 5383234353Sdim EVT TrTy = N->getValueType(0); 5384234353Sdim 5385234353Sdim unsigned NumElem = VecTy.getVectorNumElements(); 5386234353Sdim unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 5387234353Sdim 5388234353Sdim EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 5389234353Sdim assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 5390234353Sdim 5391234353Sdim SDValue EltNo = N0->getOperand(1); 5392234353Sdim if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 5393234353Sdim int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 5394239462Sdim EVT IndexTy = N0->getOperand(1).getValueType(); 5395234353Sdim int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 5396234353Sdim 5397234353Sdim SDValue V = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5398234353Sdim NVT, N0.getOperand(0)); 5399234353Sdim 5400234353Sdim return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, 5401234353Sdim N->getDebugLoc(), TrTy, V, 5402239462Sdim DAG.getConstant(Index, IndexTy)); 5403234353Sdim } 5404234353Sdim } 5405234353Sdim 5406249423Sdim // Fold a series of buildvector, bitcast, and truncate if possible. 5407249423Sdim // For example fold 5408249423Sdim // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to 5409249423Sdim // (2xi32 (buildvector x, y)). 5410249423Sdim if (Level == AfterLegalizeVectorOps && VT.isVector() && 5411249423Sdim N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 5412249423Sdim N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 5413249423Sdim N0.getOperand(0).hasOneUse()) { 5414249423Sdim 5415249423Sdim SDValue BuildVect = N0.getOperand(0); 5416249423Sdim EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType(); 5417249423Sdim EVT TruncVecEltTy = VT.getVectorElementType(); 5418249423Sdim 5419249423Sdim // Check that the element types match. 5420249423Sdim if (BuildVectEltTy == TruncVecEltTy) { 5421249423Sdim // Now we only need to compute the offset of the truncated elements. 5422249423Sdim unsigned BuildVecNumElts = BuildVect.getNumOperands(); 5423249423Sdim unsigned TruncVecNumElts = VT.getVectorNumElements(); 5424249423Sdim unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts; 5425249423Sdim 5426249423Sdim assert((BuildVecNumElts % TruncVecNumElts) == 0 && 5427249423Sdim "Invalid number of elements"); 5428249423Sdim 5429249423Sdim SmallVector<SDValue, 8> Opnds; 5430249423Sdim for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset) 5431249423Sdim Opnds.push_back(BuildVect.getOperand(i)); 5432249423Sdim 5433249423Sdim return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT, &Opnds[0], 5434249423Sdim Opnds.size()); 5435249423Sdim } 5436249423Sdim } 5437249423Sdim 5438193323Sed // See if we can simplify the input to this truncate through knowledge that 5439219077Sdim // only the low bits are being used. 5440219077Sdim // For example "trunc (or (shl x, 8), y)" // -> trunc y 5441221345Sdim // Currently we only perform this optimization on scalars because vectors 5442219077Sdim // may have different active low bits. 5443219077Sdim if (!VT.isVector()) { 5444219077Sdim SDValue Shorter = 5445219077Sdim GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 5446219077Sdim VT.getSizeInBits())); 5447219077Sdim if (Shorter.getNode()) 5448219077Sdim return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Shorter); 5449219077Sdim } 5450193323Sed // fold (truncate (load x)) -> (smaller load x) 5451193323Sed // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 5452210299Sed if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 5453210299Sed SDValue Reduced = ReduceLoadWidth(N); 5454210299Sed if (Reduced.getNode()) 5455210299Sed return Reduced; 5456210299Sed } 5457243830Sdim // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), 5458243830Sdim // where ... are all 'undef'. 5459243830Sdim if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { 5460243830Sdim SmallVector<EVT, 8> VTs; 5461243830Sdim SDValue V; 5462243830Sdim unsigned Idx = 0; 5463243830Sdim unsigned NumDefs = 0; 5464210299Sed 5465243830Sdim for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 5466243830Sdim SDValue X = N0.getOperand(i); 5467243830Sdim if (X.getOpcode() != ISD::UNDEF) { 5468243830Sdim V = X; 5469243830Sdim Idx = i; 5470243830Sdim NumDefs++; 5471243830Sdim } 5472243830Sdim // Stop if more than one members are non-undef. 5473243830Sdim if (NumDefs > 1) 5474243830Sdim break; 5475243830Sdim VTs.push_back(EVT::getVectorVT(*DAG.getContext(), 5476243830Sdim VT.getVectorElementType(), 5477243830Sdim X.getValueType().getVectorNumElements())); 5478243830Sdim } 5479243830Sdim 5480243830Sdim if (NumDefs == 0) 5481243830Sdim return DAG.getUNDEF(VT); 5482243830Sdim 5483243830Sdim if (NumDefs == 1) { 5484243830Sdim assert(V.getNode() && "The single defined operand is empty!"); 5485243830Sdim SmallVector<SDValue, 8> Opnds; 5486243830Sdim for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 5487243830Sdim if (i != Idx) { 5488243830Sdim Opnds.push_back(DAG.getUNDEF(VTs[i])); 5489243830Sdim continue; 5490243830Sdim } 5491243830Sdim SDValue NV = DAG.getNode(ISD::TRUNCATE, V.getDebugLoc(), VTs[i], V); 5492243830Sdim AddToWorkList(NV.getNode()); 5493243830Sdim Opnds.push_back(NV); 5494243830Sdim } 5495243830Sdim return DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 5496243830Sdim &Opnds[0], Opnds.size()); 5497243830Sdim } 5498243830Sdim } 5499243830Sdim 5500210299Sed // Simplify the operands using demanded-bits information. 5501210299Sed if (!VT.isVector() && 5502210299Sed SimplifyDemandedBits(SDValue(N, 0))) 5503210299Sed return SDValue(N, 0); 5504210299Sed 5505207618Srdivacky return SDValue(); 5506193323Sed} 5507193323Sed 5508193323Sedstatic SDNode *getBuildPairElt(SDNode *N, unsigned i) { 5509193323Sed SDValue Elt = N->getOperand(i); 5510193323Sed if (Elt.getOpcode() != ISD::MERGE_VALUES) 5511193323Sed return Elt.getNode(); 5512193323Sed return Elt.getOperand(Elt.getResNo()).getNode(); 5513193323Sed} 5514193323Sed 5515193323Sed/// CombineConsecutiveLoads - build_pair (load, load) -> load 5516193323Sed/// if load locations are consecutive. 5517198090SrdivackySDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 5518193323Sed assert(N->getOpcode() == ISD::BUILD_PAIR); 5519193323Sed 5520193574Sed LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 5521193574Sed LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 5522218893Sdim if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 5523218893Sdim LD1->getPointerInfo().getAddrSpace() != 5524218893Sdim LD2->getPointerInfo().getAddrSpace()) 5525193323Sed return SDValue(); 5526198090Srdivacky EVT LD1VT = LD1->getValueType(0); 5527193323Sed 5528193323Sed if (ISD::isNON_EXTLoad(LD2) && 5529193323Sed LD2->hasOneUse() && 5530193323Sed // If both are volatile this would reduce the number of volatile loads. 5531193323Sed // If one is volatile it might be ok, but play conservative and bail out. 5532193574Sed !LD1->isVolatile() && 5533193574Sed !LD2->isVolatile() && 5534200581Srdivacky DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { 5535193574Sed unsigned Align = LD1->getAlignment(); 5536243830Sdim unsigned NewAlign = TLI.getDataLayout()-> 5537198090Srdivacky getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 5538193323Sed 5539193323Sed if (NewAlign <= Align && 5540193323Sed (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 5541193574Sed return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(), 5542218893Sdim LD1->getBasePtr(), LD1->getPointerInfo(), 5543234353Sdim false, false, false, Align); 5544193323Sed } 5545193323Sed 5546193323Sed return SDValue(); 5547193323Sed} 5548193323Sed 5549218893SdimSDValue DAGCombiner::visitBITCAST(SDNode *N) { 5550193323Sed SDValue N0 = N->getOperand(0); 5551198090Srdivacky EVT VT = N->getValueType(0); 5552193323Sed 5553193323Sed // If the input is a BUILD_VECTOR with all constant elements, fold this now. 5554193323Sed // Only do this before legalize, since afterward the target may be depending 5555193323Sed // on the bitconvert. 5556193323Sed // First check to see if this is all constant. 5557193323Sed if (!LegalTypes && 5558193323Sed N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 5559193323Sed VT.isVector()) { 5560193323Sed bool isSimple = true; 5561193323Sed for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) 5562193323Sed if (N0.getOperand(i).getOpcode() != ISD::UNDEF && 5563193323Sed N0.getOperand(i).getOpcode() != ISD::Constant && 5564193323Sed N0.getOperand(i).getOpcode() != ISD::ConstantFP) { 5565193323Sed isSimple = false; 5566193323Sed break; 5567193323Sed } 5568193323Sed 5569198090Srdivacky EVT DestEltVT = N->getValueType(0).getVectorElementType(); 5570193323Sed assert(!DestEltVT.isVector() && 5571193323Sed "Element type of vector ValueType must not be vector!"); 5572193323Sed if (isSimple) 5573218893Sdim return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 5574193323Sed } 5575193323Sed 5576193323Sed // If the input is a constant, let getNode fold it. 5577193323Sed if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 5578218893Sdim SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0); 5579198090Srdivacky if (Res.getNode() != N) { 5580198090Srdivacky if (!LegalOperations || 5581198090Srdivacky TLI.isOperationLegal(Res.getNode()->getOpcode(), VT)) 5582198090Srdivacky return Res; 5583198090Srdivacky 5584198090Srdivacky // Folding it resulted in an illegal node, and it's too late to 5585198090Srdivacky // do that. Clean up the old node and forego the transformation. 5586198090Srdivacky // Ideally this won't happen very often, because instcombine 5587198090Srdivacky // and the earlier dagcombine runs (where illegal nodes are 5588198090Srdivacky // permitted) should have folded most of them already. 5589198090Srdivacky DAG.DeleteNode(Res.getNode()); 5590198090Srdivacky } 5591193323Sed } 5592193323Sed 5593193323Sed // (conv (conv x, t1), t2) -> (conv x, t2) 5594218893Sdim if (N0.getOpcode() == ISD::BITCAST) 5595218893Sdim return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, 5596193323Sed N0.getOperand(0)); 5597193323Sed 5598193323Sed // fold (conv (load x)) -> (load (conv*)x) 5599193323Sed // If the resultant load doesn't need a higher alignment than the original! 5600193323Sed if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 5601193323Sed // Do not change the width of a volatile load. 5602193323Sed !cast<LoadSDNode>(N0)->isVolatile() && 5603193323Sed (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) { 5604193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5605243830Sdim unsigned Align = TLI.getDataLayout()-> 5606198090Srdivacky getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 5607193323Sed unsigned OrigAlign = LN0->getAlignment(); 5608193323Sed 5609193323Sed if (Align <= OrigAlign) { 5610193323Sed SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(), 5611218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 5612203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 5613234353Sdim LN0->isInvariant(), OrigAlign); 5614193323Sed AddToWorkList(N); 5615193323Sed CombineTo(N0.getNode(), 5616218893Sdim DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5617193323Sed N0.getValueType(), Load), 5618193323Sed Load.getValue(1)); 5619193323Sed return Load; 5620193323Sed } 5621193323Sed } 5622193323Sed 5623193323Sed // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 5624193323Sed // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 5625193323Sed // This often reduces constant pool loads. 5626234353Sdim if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) || 5627234353Sdim (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) && 5628243830Sdim N0.getNode()->hasOneUse() && VT.isInteger() && 5629243830Sdim !VT.isVector() && !N0.getValueType().isVector()) { 5630218893Sdim SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT, 5631193323Sed N0.getOperand(0)); 5632193323Sed AddToWorkList(NewConv.getNode()); 5633193323Sed 5634193323Sed APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 5635193323Sed if (N0.getOpcode() == ISD::FNEG) 5636193323Sed return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, 5637193323Sed NewConv, DAG.getConstant(SignBit, VT)); 5638193323Sed assert(N0.getOpcode() == ISD::FABS); 5639193323Sed return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 5640193323Sed NewConv, DAG.getConstant(~SignBit, VT)); 5641193323Sed } 5642193323Sed 5643193323Sed // fold (bitconvert (fcopysign cst, x)) -> 5644193323Sed // (or (and (bitconvert x), sign), (and cst, (not sign))) 5645193323Sed // Note that we don't handle (copysign x, cst) because this can always be 5646193323Sed // folded to an fneg or fabs. 5647193323Sed if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 5648193323Sed isa<ConstantFPSDNode>(N0.getOperand(0)) && 5649193323Sed VT.isInteger() && !VT.isVector()) { 5650193323Sed unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); 5651198090Srdivacky EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 5652207618Srdivacky if (isTypeLegal(IntXVT)) { 5653218893Sdim SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5654193323Sed IntXVT, N0.getOperand(1)); 5655193323Sed AddToWorkList(X.getNode()); 5656193323Sed 5657193323Sed // If X has a different width than the result/lhs, sext it or truncate it. 5658193323Sed unsigned VTWidth = VT.getSizeInBits(); 5659193323Sed if (OrigXWidth < VTWidth) { 5660193323Sed X = DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, X); 5661193323Sed AddToWorkList(X.getNode()); 5662193323Sed } else if (OrigXWidth > VTWidth) { 5663193323Sed // To get the sign bit in the right place, we have to shift it right 5664193323Sed // before truncating. 5665193323Sed X = DAG.getNode(ISD::SRL, X.getDebugLoc(), 5666193323Sed X.getValueType(), X, 5667193323Sed DAG.getConstant(OrigXWidth-VTWidth, X.getValueType())); 5668193323Sed AddToWorkList(X.getNode()); 5669193323Sed X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X); 5670193323Sed AddToWorkList(X.getNode()); 5671193323Sed } 5672193323Sed 5673193323Sed APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 5674193323Sed X = DAG.getNode(ISD::AND, X.getDebugLoc(), VT, 5675193323Sed X, DAG.getConstant(SignBit, VT)); 5676193323Sed AddToWorkList(X.getNode()); 5677193323Sed 5678218893Sdim SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5679193323Sed VT, N0.getOperand(0)); 5680193323Sed Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT, 5681193323Sed Cst, DAG.getConstant(~SignBit, VT)); 5682193323Sed AddToWorkList(Cst.getNode()); 5683193323Sed 5684193323Sed return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, X, Cst); 5685193323Sed } 5686193323Sed } 5687193323Sed 5688193323Sed // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 5689193323Sed if (N0.getOpcode() == ISD::BUILD_PAIR) { 5690193323Sed SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT); 5691193323Sed if (CombineLD.getNode()) 5692193323Sed return CombineLD; 5693193323Sed } 5694193323Sed 5695193323Sed return SDValue(); 5696193323Sed} 5697193323Sed 5698193323SedSDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 5699198090Srdivacky EVT VT = N->getValueType(0); 5700193323Sed return CombineConsecutiveLoads(N, VT); 5701193323Sed} 5702193323Sed 5703218893Sdim/// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector 5704193323Sed/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the 5705193323Sed/// destination element value type. 5706193323SedSDValue DAGCombiner:: 5707218893SdimConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 5708198090Srdivacky EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 5709193323Sed 5710193323Sed // If this is already the right type, we're done. 5711193323Sed if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 5712193323Sed 5713193323Sed unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 5714193323Sed unsigned DstBitSize = DstEltVT.getSizeInBits(); 5715193323Sed 5716193323Sed // If this is a conversion of N elements of one type to N elements of another 5717193323Sed // type, convert each element. This handles FP<->INT cases. 5718193323Sed if (SrcBitSize == DstBitSize) { 5719212904Sdim EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 5720212904Sdim BV->getValueType(0).getVectorNumElements()); 5721212904Sdim 5722212904Sdim // Due to the FP element handling below calling this routine recursively, 5723212904Sdim // we can end up with a scalar-to-vector node here. 5724212904Sdim if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 5725218893Sdim return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT, 5726218893Sdim DAG.getNode(ISD::BITCAST, BV->getDebugLoc(), 5727212904Sdim DstEltVT, BV->getOperand(0))); 5728218893Sdim 5729193323Sed SmallVector<SDValue, 8> Ops; 5730193323Sed for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 5731193323Sed SDValue Op = BV->getOperand(i); 5732193323Sed // If the vector element type is not legal, the BUILD_VECTOR operands 5733193323Sed // are promoted and implicitly truncated. Make that explicit here. 5734193323Sed if (Op.getValueType() != SrcEltVT) 5735193323Sed Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op); 5736218893Sdim Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(), 5737193323Sed DstEltVT, Op)); 5738193323Sed AddToWorkList(Ops.back().getNode()); 5739193323Sed } 5740193323Sed return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5741193323Sed &Ops[0], Ops.size()); 5742193323Sed } 5743193323Sed 5744193323Sed // Otherwise, we're growing or shrinking the elements. To avoid having to 5745193323Sed // handle annoying details of growing/shrinking FP values, we convert them to 5746193323Sed // int first. 5747193323Sed if (SrcEltVT.isFloatingPoint()) { 5748193323Sed // Convert the input float vector to a int vector where the elements are the 5749193323Sed // same sizes. 5750193323Sed assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!"); 5751198090Srdivacky EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 5752218893Sdim BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 5753193323Sed SrcEltVT = IntVT; 5754193323Sed } 5755193323Sed 5756193323Sed // Now we know the input is an integer vector. If the output is a FP type, 5757193323Sed // convert to integer first, then to FP of the right size. 5758193323Sed if (DstEltVT.isFloatingPoint()) { 5759193323Sed assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!"); 5760198090Srdivacky EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 5761218893Sdim SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 5762193323Sed 5763193323Sed // Next, convert to FP elements of the same size. 5764218893Sdim return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 5765193323Sed } 5766193323Sed 5767193323Sed // Okay, we know the src/dst types are both integers of differing types. 5768193323Sed // Handling growing first. 5769193323Sed assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 5770193323Sed if (SrcBitSize < DstBitSize) { 5771193323Sed unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 5772193323Sed 5773193323Sed SmallVector<SDValue, 8> Ops; 5774193323Sed for (unsigned i = 0, e = BV->getNumOperands(); i != e; 5775193323Sed i += NumInputsPerOutput) { 5776193323Sed bool isLE = TLI.isLittleEndian(); 5777193323Sed APInt NewBits = APInt(DstBitSize, 0); 5778193323Sed bool EltIsUndef = true; 5779193323Sed for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 5780193323Sed // Shift the previously computed bits over. 5781193323Sed NewBits <<= SrcBitSize; 5782193323Sed SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 5783193323Sed if (Op.getOpcode() == ISD::UNDEF) continue; 5784193323Sed EltIsUndef = false; 5785193323Sed 5786218893Sdim NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 5787207618Srdivacky zextOrTrunc(SrcBitSize).zext(DstBitSize); 5788193323Sed } 5789193323Sed 5790193323Sed if (EltIsUndef) 5791193323Sed Ops.push_back(DAG.getUNDEF(DstEltVT)); 5792193323Sed else 5793193323Sed Ops.push_back(DAG.getConstant(NewBits, DstEltVT)); 5794193323Sed } 5795193323Sed 5796198090Srdivacky EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 5797193323Sed return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5798193323Sed &Ops[0], Ops.size()); 5799193323Sed } 5800193323Sed 5801193323Sed // Finally, this must be the case where we are shrinking elements: each input 5802193323Sed // turns into multiple outputs. 5803193323Sed bool isS2V = ISD::isScalarToVector(BV); 5804193323Sed unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 5805198090Srdivacky EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 5806198090Srdivacky NumOutputsPerInput*BV->getNumOperands()); 5807193323Sed SmallVector<SDValue, 8> Ops; 5808193323Sed 5809193323Sed for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 5810193323Sed if (BV->getOperand(i).getOpcode() == ISD::UNDEF) { 5811193323Sed for (unsigned j = 0; j != NumOutputsPerInput; ++j) 5812193323Sed Ops.push_back(DAG.getUNDEF(DstEltVT)); 5813193323Sed continue; 5814193323Sed } 5815193323Sed 5816218893Sdim APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))-> 5817218893Sdim getAPIntValue().zextOrTrunc(SrcBitSize); 5818193323Sed 5819193323Sed for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 5820218893Sdim APInt ThisVal = OpVal.trunc(DstBitSize); 5821193323Sed Ops.push_back(DAG.getConstant(ThisVal, DstEltVT)); 5822218893Sdim if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal) 5823193323Sed // Simply turn this into a SCALAR_TO_VECTOR of the new type. 5824193323Sed return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT, 5825193323Sed Ops[0]); 5826193323Sed OpVal = OpVal.lshr(DstBitSize); 5827193323Sed } 5828193323Sed 5829193323Sed // For big endian targets, swap the order of the pieces of each element. 5830193323Sed if (TLI.isBigEndian()) 5831193323Sed std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 5832193323Sed } 5833193323Sed 5834193323Sed return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5835193323Sed &Ops[0], Ops.size()); 5836193323Sed} 5837193323Sed 5838193323SedSDValue DAGCombiner::visitFADD(SDNode *N) { 5839193323Sed SDValue N0 = N->getOperand(0); 5840193323Sed SDValue N1 = N->getOperand(1); 5841193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 5842193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5843198090Srdivacky EVT VT = N->getValueType(0); 5844193323Sed 5845193323Sed // fold vector ops 5846193323Sed if (VT.isVector()) { 5847193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 5848193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 5849193323Sed } 5850193323Sed 5851239462Sdim // fold (fadd c1, c2) -> c1 + c2 5852243830Sdim if (N0CFP && N1CFP) 5853193323Sed return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1); 5854193323Sed // canonicalize constant to RHS 5855193323Sed if (N0CFP && !N1CFP) 5856193323Sed return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0); 5857193323Sed // fold (fadd A, 0) -> A 5858234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 5859234353Sdim N1CFP->getValueAPF().isZero()) 5860193323Sed return N0; 5861193323Sed // fold (fadd A, (fneg B)) -> (fsub A, B) 5862234353Sdim if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 5863243830Sdim isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 5864193323Sed return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, 5865193323Sed GetNegatedExpression(N1, DAG, LegalOperations)); 5866193323Sed // fold (fadd (fneg A), B) -> (fsub B, A) 5867234353Sdim if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 5868243830Sdim isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 5869193323Sed return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1, 5870193323Sed GetNegatedExpression(N0, DAG, LegalOperations)); 5871193323Sed 5872193323Sed // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 5873234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 5874234353Sdim N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 5875234353Sdim isa<ConstantFPSDNode>(N0.getOperand(1))) 5876193323Sed return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0), 5877193323Sed DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5878193323Sed N0.getOperand(1), N1)); 5879193323Sed 5880249423Sdim // No FP constant should be created after legalization as Instruction 5881249423Sdim // Selection pass has hard time in dealing with FP constant. 5882249423Sdim // 5883249423Sdim // We don't need test this condition for transformation like following, as 5884249423Sdim // the DAG being transformed implies it is legal to take FP constant as 5885249423Sdim // operand. 5886249423Sdim // 5887249423Sdim // (fadd (fmul c, x), x) -> (fmul c+1, x) 5888249423Sdim // 5889249423Sdim bool AllowNewFpConst = (Level < AfterLegalizeDAG); 5890249423Sdim 5891243830Sdim // If allow, fold (fadd (fneg x), x) -> 0.0 5892249423Sdim if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && 5893243830Sdim N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) { 5894243830Sdim return DAG.getConstantFP(0.0, VT); 5895243830Sdim } 5896243830Sdim 5897243830Sdim // If allow, fold (fadd x, (fneg x)) -> 0.0 5898249423Sdim if (AllowNewFpConst && DAG.getTarget().Options.UnsafeFPMath && 5899243830Sdim N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) { 5900243830Sdim return DAG.getConstantFP(0.0, VT); 5901243830Sdim } 5902243830Sdim 5903243830Sdim // In unsafe math mode, we can fold chains of FADD's of the same value 5904243830Sdim // into multiplications. This transform is not safe in general because 5905243830Sdim // we are reducing the number of rounding steps. 5906243830Sdim if (DAG.getTarget().Options.UnsafeFPMath && 5907243830Sdim TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && 5908243830Sdim !N0CFP && !N1CFP) { 5909243830Sdim if (N0.getOpcode() == ISD::FMUL) { 5910243830Sdim ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); 5911243830Sdim ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 5912243830Sdim 5913243830Sdim // (fadd (fmul c, x), x) -> (fmul c+1, x) 5914243830Sdim if (CFP00 && !CFP01 && N0.getOperand(1) == N1) { 5915243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5916243830Sdim SDValue(CFP00, 0), 5917243830Sdim DAG.getConstantFP(1.0, VT)); 5918243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5919243830Sdim N1, NewCFP); 5920243830Sdim } 5921243830Sdim 5922243830Sdim // (fadd (fmul x, c), x) -> (fmul c+1, x) 5923243830Sdim if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 5924243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5925243830Sdim SDValue(CFP01, 0), 5926243830Sdim DAG.getConstantFP(1.0, VT)); 5927243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5928243830Sdim N1, NewCFP); 5929243830Sdim } 5930243830Sdim 5931243830Sdim // (fadd (fmul c, x), (fadd x, x)) -> (fmul c+2, x) 5932243830Sdim if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD && 5933243830Sdim N1.getOperand(0) == N1.getOperand(1) && 5934243830Sdim N0.getOperand(1) == N1.getOperand(0)) { 5935243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5936243830Sdim SDValue(CFP00, 0), 5937243830Sdim DAG.getConstantFP(2.0, VT)); 5938243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5939243830Sdim N0.getOperand(1), NewCFP); 5940243830Sdim } 5941243830Sdim 5942243830Sdim // (fadd (fmul x, c), (fadd x, x)) -> (fmul c+2, x) 5943243830Sdim if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 5944243830Sdim N1.getOperand(0) == N1.getOperand(1) && 5945243830Sdim N0.getOperand(0) == N1.getOperand(0)) { 5946243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5947243830Sdim SDValue(CFP01, 0), 5948243830Sdim DAG.getConstantFP(2.0, VT)); 5949243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5950243830Sdim N0.getOperand(0), NewCFP); 5951243830Sdim } 5952243830Sdim } 5953243830Sdim 5954243830Sdim if (N1.getOpcode() == ISD::FMUL) { 5955243830Sdim ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); 5956243830Sdim ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1)); 5957243830Sdim 5958243830Sdim // (fadd x, (fmul c, x)) -> (fmul c+1, x) 5959243830Sdim if (CFP10 && !CFP11 && N1.getOperand(1) == N0) { 5960243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5961243830Sdim SDValue(CFP10, 0), 5962243830Sdim DAG.getConstantFP(1.0, VT)); 5963243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5964243830Sdim N0, NewCFP); 5965243830Sdim } 5966243830Sdim 5967243830Sdim // (fadd x, (fmul x, c)) -> (fmul c+1, x) 5968243830Sdim if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 5969243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5970243830Sdim SDValue(CFP11, 0), 5971243830Sdim DAG.getConstantFP(1.0, VT)); 5972243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5973243830Sdim N0, NewCFP); 5974243830Sdim } 5975243830Sdim 5976243830Sdim 5977243830Sdim // (fadd (fadd x, x), (fmul c, x)) -> (fmul c+2, x) 5978243830Sdim if (CFP10 && !CFP11 && N1.getOpcode() == ISD::FADD && 5979243830Sdim N1.getOperand(0) == N1.getOperand(1) && 5980243830Sdim N0.getOperand(1) == N1.getOperand(0)) { 5981243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5982243830Sdim SDValue(CFP10, 0), 5983243830Sdim DAG.getConstantFP(2.0, VT)); 5984243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5985243830Sdim N0.getOperand(1), NewCFP); 5986243830Sdim } 5987243830Sdim 5988243830Sdim // (fadd (fadd x, x), (fmul x, c)) -> (fmul c+2, x) 5989243830Sdim if (CFP11 && !CFP10 && N1.getOpcode() == ISD::FADD && 5990243830Sdim N1.getOperand(0) == N1.getOperand(1) && 5991243830Sdim N0.getOperand(0) == N1.getOperand(0)) { 5992243830Sdim SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5993243830Sdim SDValue(CFP11, 0), 5994243830Sdim DAG.getConstantFP(2.0, VT)); 5995243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5996243830Sdim N0.getOperand(0), NewCFP); 5997243830Sdim } 5998243830Sdim } 5999243830Sdim 6000249423Sdim if (N0.getOpcode() == ISD::FADD && AllowNewFpConst) { 6001249423Sdim ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); 6002249423Sdim // (fadd (fadd x, x), x) -> (fmul 3.0, x) 6003249423Sdim if (!CFP && N0.getOperand(0) == N0.getOperand(1) && 6004249423Sdim (N0.getOperand(0) == N1)) { 6005249423Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6006249423Sdim N1, DAG.getConstantFP(3.0, VT)); 6007249423Sdim } 6008249423Sdim } 6009249423Sdim 6010249423Sdim if (N1.getOpcode() == ISD::FADD && AllowNewFpConst) { 6011249423Sdim ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); 6012249423Sdim // (fadd x, (fadd x, x)) -> (fmul 3.0, x) 6013249423Sdim if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && 6014249423Sdim N1.getOperand(0) == N0) { 6015249423Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6016249423Sdim N0, DAG.getConstantFP(3.0, VT)); 6017249423Sdim } 6018249423Sdim } 6019249423Sdim 6020243830Sdim // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x) 6021249423Sdim if (AllowNewFpConst && 6022249423Sdim N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 6023243830Sdim N0.getOperand(0) == N0.getOperand(1) && 6024243830Sdim N1.getOperand(0) == N1.getOperand(1) && 6025243830Sdim N0.getOperand(0) == N1.getOperand(0)) { 6026243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6027243830Sdim N0.getOperand(0), 6028243830Sdim DAG.getConstantFP(4.0, VT)); 6029243830Sdim } 6030243830Sdim } 6031243830Sdim 6032239462Sdim // FADD -> FMA combines: 6033239462Sdim if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 6034239462Sdim DAG.getTarget().Options.UnsafeFPMath) && 6035239462Sdim DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) && 6036239462Sdim TLI.isOperationLegalOrCustom(ISD::FMA, VT)) { 6037239462Sdim 6038239462Sdim // fold (fadd (fmul x, y), z) -> (fma x, y, z) 6039239462Sdim if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) { 6040239462Sdim return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, 6041239462Sdim N0.getOperand(0), N0.getOperand(1), N1); 6042239462Sdim } 6043243830Sdim 6044243830Sdim // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 6045239462Sdim // Note: Commutes FADD operands. 6046239462Sdim if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) { 6047239462Sdim return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, 6048239462Sdim N1.getOperand(0), N1.getOperand(1), N0); 6049239462Sdim } 6050239462Sdim } 6051239462Sdim 6052193323Sed return SDValue(); 6053193323Sed} 6054193323Sed 6055193323SedSDValue DAGCombiner::visitFSUB(SDNode *N) { 6056193323Sed SDValue N0 = N->getOperand(0); 6057193323Sed SDValue N1 = N->getOperand(1); 6058193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6059193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6060198090Srdivacky EVT VT = N->getValueType(0); 6061239462Sdim DebugLoc dl = N->getDebugLoc(); 6062193323Sed 6063193323Sed // fold vector ops 6064193323Sed if (VT.isVector()) { 6065193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 6066193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 6067193323Sed } 6068193323Sed 6069193323Sed // fold (fsub c1, c2) -> c1-c2 6070243830Sdim if (N0CFP && N1CFP) 6071193323Sed return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1); 6072193323Sed // fold (fsub A, 0) -> A 6073234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6074234353Sdim N1CFP && N1CFP->getValueAPF().isZero()) 6075193323Sed return N0; 6076193323Sed // fold (fsub 0, B) -> -B 6077234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6078234353Sdim N0CFP && N0CFP->getValueAPF().isZero()) { 6079234353Sdim if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 6080193323Sed return GetNegatedExpression(N1, DAG, LegalOperations); 6081193323Sed if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6082239462Sdim return DAG.getNode(ISD::FNEG, dl, VT, N1); 6083193323Sed } 6084193323Sed // fold (fsub A, (fneg B)) -> (fadd A, B) 6085234353Sdim if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 6086239462Sdim return DAG.getNode(ISD::FADD, dl, VT, N0, 6087193323Sed GetNegatedExpression(N1, DAG, LegalOperations)); 6088193323Sed 6089234353Sdim // If 'unsafe math' is enabled, fold 6090239462Sdim // (fsub x, x) -> 0.0 & 6091234353Sdim // (fsub x, (fadd x, y)) -> (fneg y) & 6092234353Sdim // (fsub x, (fadd y, x)) -> (fneg y) 6093234353Sdim if (DAG.getTarget().Options.UnsafeFPMath) { 6094239462Sdim if (N0 == N1) 6095239462Sdim return DAG.getConstantFP(0.0f, VT); 6096239462Sdim 6097234353Sdim if (N1.getOpcode() == ISD::FADD) { 6098234353Sdim SDValue N10 = N1->getOperand(0); 6099234353Sdim SDValue N11 = N1->getOperand(1); 6100234353Sdim 6101234353Sdim if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, 6102234353Sdim &DAG.getTarget().Options)) 6103234353Sdim return GetNegatedExpression(N11, DAG, LegalOperations); 6104234353Sdim else if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, 6105234353Sdim &DAG.getTarget().Options)) 6106234353Sdim return GetNegatedExpression(N10, DAG, LegalOperations); 6107234353Sdim } 6108234353Sdim } 6109234353Sdim 6110239462Sdim // FSUB -> FMA combines: 6111239462Sdim if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 6112239462Sdim DAG.getTarget().Options.UnsafeFPMath) && 6113239462Sdim DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) && 6114239462Sdim TLI.isOperationLegalOrCustom(ISD::FMA, VT)) { 6115239462Sdim 6116239462Sdim // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 6117239462Sdim if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) { 6118239462Sdim return DAG.getNode(ISD::FMA, dl, VT, 6119239462Sdim N0.getOperand(0), N0.getOperand(1), 6120239462Sdim DAG.getNode(ISD::FNEG, dl, VT, N1)); 6121239462Sdim } 6122239462Sdim 6123239462Sdim // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 6124239462Sdim // Note: Commutes FSUB operands. 6125239462Sdim if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) { 6126239462Sdim return DAG.getNode(ISD::FMA, dl, VT, 6127239462Sdim DAG.getNode(ISD::FNEG, dl, VT, 6128239462Sdim N1.getOperand(0)), 6129239462Sdim N1.getOperand(1), N0); 6130239462Sdim } 6131239462Sdim 6132239462Sdim // fold (fsub (-(fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 6133239462Sdim if (N0.getOpcode() == ISD::FNEG && 6134239462Sdim N0.getOperand(0).getOpcode() == ISD::FMUL && 6135239462Sdim N0->hasOneUse() && N0.getOperand(0).hasOneUse()) { 6136239462Sdim SDValue N00 = N0.getOperand(0).getOperand(0); 6137239462Sdim SDValue N01 = N0.getOperand(0).getOperand(1); 6138239462Sdim return DAG.getNode(ISD::FMA, dl, VT, 6139239462Sdim DAG.getNode(ISD::FNEG, dl, VT, N00), N01, 6140239462Sdim DAG.getNode(ISD::FNEG, dl, VT, N1)); 6141239462Sdim } 6142239462Sdim } 6143239462Sdim 6144193323Sed return SDValue(); 6145193323Sed} 6146193323Sed 6147193323SedSDValue DAGCombiner::visitFMUL(SDNode *N) { 6148193323Sed SDValue N0 = N->getOperand(0); 6149193323Sed SDValue N1 = N->getOperand(1); 6150193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6151193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6152198090Srdivacky EVT VT = N->getValueType(0); 6153234353Sdim const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6154193323Sed 6155193323Sed // fold vector ops 6156193323Sed if (VT.isVector()) { 6157193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 6158193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 6159193323Sed } 6160193323Sed 6161193323Sed // fold (fmul c1, c2) -> c1*c2 6162243830Sdim if (N0CFP && N1CFP) 6163193323Sed return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1); 6164193323Sed // canonicalize constant to RHS 6165193323Sed if (N0CFP && !N1CFP) 6166193323Sed return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0); 6167193323Sed // fold (fmul A, 0) -> 0 6168234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6169234353Sdim N1CFP && N1CFP->getValueAPF().isZero()) 6170193323Sed return N1; 6171193574Sed // fold (fmul A, 0) -> 0, vector edition. 6172234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6173234353Sdim ISD::isBuildVectorAllZeros(N1.getNode())) 6174193574Sed return N1; 6175239462Sdim // fold (fmul A, 1.0) -> A 6176239462Sdim if (N1CFP && N1CFP->isExactlyValue(1.0)) 6177239462Sdim return N0; 6178193323Sed // fold (fmul X, 2.0) -> (fadd X, X) 6179193323Sed if (N1CFP && N1CFP->isExactlyValue(+2.0)) 6180193323Sed return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0); 6181198090Srdivacky // fold (fmul X, -1.0) -> (fneg X) 6182193323Sed if (N1CFP && N1CFP->isExactlyValue(-1.0)) 6183193323Sed if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6184193323Sed return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0); 6185193323Sed 6186193323Sed // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 6187234353Sdim if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 6188234353Sdim &DAG.getTarget().Options)) { 6189234353Sdim if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 6190234353Sdim &DAG.getTarget().Options)) { 6191193323Sed // Both can be negated for free, check to see if at least one is cheaper 6192193323Sed // negated. 6193193323Sed if (LHSNeg == 2 || RHSNeg == 2) 6194193323Sed return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6195193323Sed GetNegatedExpression(N0, DAG, LegalOperations), 6196193323Sed GetNegatedExpression(N1, DAG, LegalOperations)); 6197193323Sed } 6198193323Sed } 6199193323Sed 6200193323Sed // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 6201234353Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6202234353Sdim N1CFP && N0.getOpcode() == ISD::FMUL && 6203193323Sed N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1))) 6204193323Sed return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0), 6205193323Sed DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6206193323Sed N0.getOperand(1), N1)); 6207193323Sed 6208193323Sed return SDValue(); 6209193323Sed} 6210193323Sed 6211239462SdimSDValue DAGCombiner::visitFMA(SDNode *N) { 6212239462Sdim SDValue N0 = N->getOperand(0); 6213239462Sdim SDValue N1 = N->getOperand(1); 6214239462Sdim SDValue N2 = N->getOperand(2); 6215239462Sdim ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6216239462Sdim ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6217239462Sdim EVT VT = N->getValueType(0); 6218243830Sdim DebugLoc dl = N->getDebugLoc(); 6219239462Sdim 6220243830Sdim if (DAG.getTarget().Options.UnsafeFPMath) { 6221243830Sdim if (N0CFP && N0CFP->isZero()) 6222243830Sdim return N2; 6223243830Sdim if (N1CFP && N1CFP->isZero()) 6224243830Sdim return N2; 6225243830Sdim } 6226239462Sdim if (N0CFP && N0CFP->isExactlyValue(1.0)) 6227239462Sdim return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N2); 6228239462Sdim if (N1CFP && N1CFP->isExactlyValue(1.0)) 6229239462Sdim return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N2); 6230239462Sdim 6231239462Sdim // Canonicalize (fma c, x, y) -> (fma x, c, y) 6232239462Sdim if (N0CFP && !N1CFP) 6233239462Sdim return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N1, N0, N2); 6234239462Sdim 6235243830Sdim // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) 6236243830Sdim if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6237243830Sdim N2.getOpcode() == ISD::FMUL && 6238243830Sdim N0 == N2.getOperand(0) && 6239243830Sdim N2.getOperand(1).getOpcode() == ISD::ConstantFP) { 6240243830Sdim return DAG.getNode(ISD::FMUL, dl, VT, N0, 6241243830Sdim DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1))); 6242243830Sdim } 6243243830Sdim 6244243830Sdim 6245243830Sdim // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) 6246243830Sdim if (DAG.getTarget().Options.UnsafeFPMath && 6247243830Sdim N0.getOpcode() == ISD::FMUL && N1CFP && 6248243830Sdim N0.getOperand(1).getOpcode() == ISD::ConstantFP) { 6249243830Sdim return DAG.getNode(ISD::FMA, dl, VT, 6250243830Sdim N0.getOperand(0), 6251243830Sdim DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)), 6252243830Sdim N2); 6253243830Sdim } 6254243830Sdim 6255243830Sdim // (fma x, 1, y) -> (fadd x, y) 6256243830Sdim // (fma x, -1, y) -> (fadd (fneg x), y) 6257243830Sdim if (N1CFP) { 6258243830Sdim if (N1CFP->isExactlyValue(1.0)) 6259243830Sdim return DAG.getNode(ISD::FADD, dl, VT, N0, N2); 6260243830Sdim 6261243830Sdim if (N1CFP->isExactlyValue(-1.0) && 6262243830Sdim (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { 6263243830Sdim SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0); 6264243830Sdim AddToWorkList(RHSNeg.getNode()); 6265243830Sdim return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg); 6266243830Sdim } 6267243830Sdim } 6268243830Sdim 6269243830Sdim // (fma x, c, x) -> (fmul x, (c+1)) 6270243830Sdim if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2) { 6271243830Sdim return DAG.getNode(ISD::FMUL, dl, VT, 6272243830Sdim N0, 6273243830Sdim DAG.getNode(ISD::FADD, dl, VT, 6274243830Sdim N1, DAG.getConstantFP(1.0, VT))); 6275243830Sdim } 6276243830Sdim 6277243830Sdim // (fma x, c, (fneg x)) -> (fmul x, (c-1)) 6278243830Sdim if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 6279243830Sdim N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) { 6280243830Sdim return DAG.getNode(ISD::FMUL, dl, VT, 6281243830Sdim N0, 6282243830Sdim DAG.getNode(ISD::FADD, dl, VT, 6283243830Sdim N1, DAG.getConstantFP(-1.0, VT))); 6284243830Sdim } 6285243830Sdim 6286243830Sdim 6287239462Sdim return SDValue(); 6288239462Sdim} 6289239462Sdim 6290193323SedSDValue DAGCombiner::visitFDIV(SDNode *N) { 6291193323Sed SDValue N0 = N->getOperand(0); 6292193323Sed SDValue N1 = N->getOperand(1); 6293193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6294193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6295198090Srdivacky EVT VT = N->getValueType(0); 6296234353Sdim const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6297193323Sed 6298193323Sed // fold vector ops 6299193323Sed if (VT.isVector()) { 6300193323Sed SDValue FoldedVOp = SimplifyVBinOp(N); 6301193323Sed if (FoldedVOp.getNode()) return FoldedVOp; 6302193323Sed } 6303193323Sed 6304193323Sed // fold (fdiv c1, c2) -> c1/c2 6305243830Sdim if (N0CFP && N1CFP) 6306193323Sed return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1); 6307193323Sed 6308234353Sdim // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 6309243830Sdim if (N1CFP && DAG.getTarget().Options.UnsafeFPMath) { 6310234353Sdim // Compute the reciprocal 1.0 / c2. 6311234353Sdim APFloat N1APF = N1CFP->getValueAPF(); 6312234353Sdim APFloat Recip(N1APF.getSemantics(), 1); // 1.0 6313234353Sdim APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 6314234353Sdim // Only do the transform if the reciprocal is a legal fp immediate that 6315234353Sdim // isn't too nasty (eg NaN, denormal, ...). 6316234353Sdim if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 6317234353Sdim (!LegalOperations || 6318234353Sdim // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 6319234353Sdim // backend)... we should handle this gracefully after Legalize. 6320234353Sdim // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 6321234353Sdim TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 6322234353Sdim TLI.isFPImmLegal(Recip, VT))) 6323234353Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, 6324234353Sdim DAG.getConstantFP(Recip, VT)); 6325234353Sdim } 6326193323Sed 6327193323Sed // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 6328234353Sdim if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 6329234353Sdim &DAG.getTarget().Options)) { 6330234353Sdim if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 6331234353Sdim &DAG.getTarget().Options)) { 6332193323Sed // Both can be negated for free, check to see if at least one is cheaper 6333193323Sed // negated. 6334193323Sed if (LHSNeg == 2 || RHSNeg == 2) 6335193323Sed return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, 6336193323Sed GetNegatedExpression(N0, DAG, LegalOperations), 6337193323Sed GetNegatedExpression(N1, DAG, LegalOperations)); 6338193323Sed } 6339193323Sed } 6340193323Sed 6341193323Sed return SDValue(); 6342193323Sed} 6343193323Sed 6344193323SedSDValue DAGCombiner::visitFREM(SDNode *N) { 6345193323Sed SDValue N0 = N->getOperand(0); 6346193323Sed SDValue N1 = N->getOperand(1); 6347193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6348193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6349198090Srdivacky EVT VT = N->getValueType(0); 6350193323Sed 6351193323Sed // fold (frem c1, c2) -> fmod(c1,c2) 6352243830Sdim if (N0CFP && N1CFP) 6353193323Sed return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1); 6354193323Sed 6355193323Sed return SDValue(); 6356193323Sed} 6357193323Sed 6358193323SedSDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 6359193323Sed SDValue N0 = N->getOperand(0); 6360193323Sed SDValue N1 = N->getOperand(1); 6361193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6362193323Sed ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6363198090Srdivacky EVT VT = N->getValueType(0); 6364193323Sed 6365243830Sdim if (N0CFP && N1CFP) // Constant fold 6366193323Sed return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1); 6367193323Sed 6368193323Sed if (N1CFP) { 6369193323Sed const APFloat& V = N1CFP->getValueAPF(); 6370193323Sed // copysign(x, c1) -> fabs(x) iff ispos(c1) 6371193323Sed // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 6372193323Sed if (!V.isNegative()) { 6373193323Sed if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 6374193323Sed return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6375193323Sed } else { 6376193323Sed if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6377193323Sed return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, 6378193323Sed DAG.getNode(ISD::FABS, N0.getDebugLoc(), VT, N0)); 6379193323Sed } 6380193323Sed } 6381193323Sed 6382193323Sed // copysign(fabs(x), y) -> copysign(x, y) 6383193323Sed // copysign(fneg(x), y) -> copysign(x, y) 6384193323Sed // copysign(copysign(x,z), y) -> copysign(x, y) 6385193323Sed if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 6386193323Sed N0.getOpcode() == ISD::FCOPYSIGN) 6387193323Sed return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6388193323Sed N0.getOperand(0), N1); 6389193323Sed 6390193323Sed // copysign(x, abs(y)) -> abs(x) 6391193323Sed if (N1.getOpcode() == ISD::FABS) 6392193323Sed return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6393193323Sed 6394193323Sed // copysign(x, copysign(y,z)) -> copysign(x, z) 6395193323Sed if (N1.getOpcode() == ISD::FCOPYSIGN) 6396193323Sed return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6397193323Sed N0, N1.getOperand(1)); 6398193323Sed 6399193323Sed // copysign(x, fp_extend(y)) -> copysign(x, y) 6400193323Sed // copysign(x, fp_round(y)) -> copysign(x, y) 6401193323Sed if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND) 6402193323Sed return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6403193323Sed N0, N1.getOperand(0)); 6404193323Sed 6405193323Sed return SDValue(); 6406193323Sed} 6407193323Sed 6408193323SedSDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 6409193323Sed SDValue N0 = N->getOperand(0); 6410193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 6411198090Srdivacky EVT VT = N->getValueType(0); 6412198090Srdivacky EVT OpVT = N0.getValueType(); 6413193323Sed 6414193323Sed // fold (sint_to_fp c1) -> c1fp 6415243830Sdim if (N0C && 6416221345Sdim // ...but only if the target supports immediate floating-point values 6417234353Sdim (!LegalOperations || 6418224145Sdim TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 6419193323Sed return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0); 6420193323Sed 6421193323Sed // If the input is a legal type, and SINT_TO_FP is not legal on this target, 6422193323Sed // but UINT_TO_FP is legal on this target, try to convert. 6423193323Sed if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 6424193323Sed TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 6425193323Sed // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 6426193323Sed if (DAG.SignBitIsZero(N0)) 6427193323Sed return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0); 6428193323Sed } 6429193323Sed 6430239462Sdim // The next optimizations are desireable only if SELECT_CC can be lowered. 6431239462Sdim // Check against MVT::Other for SELECT_CC, which is a workaround for targets 6432239462Sdim // having to say they don't support SELECT_CC on every type the DAG knows 6433239462Sdim // about, since there is no way to mark an opcode illegal at all value types 6434239462Sdim // (See also visitSELECT) 6435239462Sdim if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) { 6436239462Sdim // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 6437239462Sdim if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 6438239462Sdim !VT.isVector() && 6439239462Sdim (!LegalOperations || 6440239462Sdim TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6441239462Sdim SDValue Ops[] = 6442239462Sdim { N0.getOperand(0), N0.getOperand(1), 6443239462Sdim DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT), 6444239462Sdim N0.getOperand(2) }; 6445239462Sdim return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6446239462Sdim } 6447239462Sdim 6448239462Sdim // fold (sint_to_fp (zext (setcc x, y, cc))) -> 6449239462Sdim // (select_cc x, y, 1.0, 0.0,, cc) 6450239462Sdim if (N0.getOpcode() == ISD::ZERO_EXTEND && 6451239462Sdim N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 6452239462Sdim (!LegalOperations || 6453239462Sdim TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6454239462Sdim SDValue Ops[] = 6455239462Sdim { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 6456239462Sdim DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT), 6457239462Sdim N0.getOperand(0).getOperand(2) }; 6458239462Sdim return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6459239462Sdim } 6460239462Sdim } 6461239462Sdim 6462193323Sed return SDValue(); 6463193323Sed} 6464193323Sed 6465193323SedSDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 6466193323Sed SDValue N0 = N->getOperand(0); 6467193323Sed ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 6468198090Srdivacky EVT VT = N->getValueType(0); 6469198090Srdivacky EVT OpVT = N0.getValueType(); 6470193323Sed 6471193323Sed // fold (uint_to_fp c1) -> c1fp 6472243830Sdim if (N0C && 6473221345Sdim // ...but only if the target supports immediate floating-point values 6474234353Sdim (!LegalOperations || 6475224145Sdim TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 6476193323Sed return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0); 6477193323Sed 6478193323Sed // If the input is a legal type, and UINT_TO_FP is not legal on this target, 6479193323Sed // but SINT_TO_FP is legal on this target, try to convert. 6480193323Sed if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 6481193323Sed TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 6482193323Sed // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 6483193323Sed if (DAG.SignBitIsZero(N0)) 6484193323Sed return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0); 6485193323Sed } 6486193323Sed 6487239462Sdim // The next optimizations are desireable only if SELECT_CC can be lowered. 6488239462Sdim // Check against MVT::Other for SELECT_CC, which is a workaround for targets 6489239462Sdim // having to say they don't support SELECT_CC on every type the DAG knows 6490239462Sdim // about, since there is no way to mark an opcode illegal at all value types 6491239462Sdim // (See also visitSELECT) 6492239462Sdim if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) { 6493239462Sdim // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 6494239462Sdim 6495239462Sdim if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 6496239462Sdim (!LegalOperations || 6497239462Sdim TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6498239462Sdim SDValue Ops[] = 6499239462Sdim { N0.getOperand(0), N0.getOperand(1), 6500239462Sdim DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT), 6501239462Sdim N0.getOperand(2) }; 6502239462Sdim return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6503239462Sdim } 6504239462Sdim } 6505239462Sdim 6506193323Sed return SDValue(); 6507193323Sed} 6508193323Sed 6509193323SedSDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 6510193323Sed SDValue N0 = N->getOperand(0); 6511193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6512198090Srdivacky EVT VT = N->getValueType(0); 6513193323Sed 6514193323Sed // fold (fp_to_sint c1fp) -> c1 6515193323Sed if (N0CFP) 6516193323Sed return DAG.getNode(ISD::FP_TO_SINT, N->getDebugLoc(), VT, N0); 6517193323Sed 6518193323Sed return SDValue(); 6519193323Sed} 6520193323Sed 6521193323SedSDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 6522193323Sed SDValue N0 = N->getOperand(0); 6523193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6524198090Srdivacky EVT VT = N->getValueType(0); 6525193323Sed 6526193323Sed // fold (fp_to_uint c1fp) -> c1 6527243830Sdim if (N0CFP) 6528193323Sed return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0); 6529193323Sed 6530193323Sed return SDValue(); 6531193323Sed} 6532193323Sed 6533193323SedSDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 6534193323Sed SDValue N0 = N->getOperand(0); 6535193323Sed SDValue N1 = N->getOperand(1); 6536193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6537198090Srdivacky EVT VT = N->getValueType(0); 6538193323Sed 6539193323Sed // fold (fp_round c1fp) -> c1fp 6540243830Sdim if (N0CFP) 6541193323Sed return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1); 6542193323Sed 6543193323Sed // fold (fp_round (fp_extend x)) -> x 6544193323Sed if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 6545193323Sed return N0.getOperand(0); 6546193323Sed 6547193323Sed // fold (fp_round (fp_round x)) -> (fp_round x) 6548193323Sed if (N0.getOpcode() == ISD::FP_ROUND) { 6549193323Sed // This is a value preserving truncation if both round's are. 6550193323Sed bool IsTrunc = N->getConstantOperandVal(1) == 1 && 6551193323Sed N0.getNode()->getConstantOperandVal(1) == 1; 6552193323Sed return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0.getOperand(0), 6553193323Sed DAG.getIntPtrConstant(IsTrunc)); 6554193323Sed } 6555193323Sed 6556193323Sed // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 6557193323Sed if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 6558193323Sed SDValue Tmp = DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), VT, 6559193323Sed N0.getOperand(0), N1); 6560193323Sed AddToWorkList(Tmp.getNode()); 6561193323Sed return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6562193323Sed Tmp, N0.getOperand(1)); 6563193323Sed } 6564193323Sed 6565193323Sed return SDValue(); 6566193323Sed} 6567193323Sed 6568193323SedSDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 6569193323Sed SDValue N0 = N->getOperand(0); 6570198090Srdivacky EVT VT = N->getValueType(0); 6571198090Srdivacky EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 6572193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6573193323Sed 6574193323Sed // fold (fp_round_inreg c1fp) -> c1fp 6575207618Srdivacky if (N0CFP && isTypeLegal(EVT)) { 6576193323Sed SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT); 6577193323Sed return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round); 6578193323Sed } 6579193323Sed 6580193323Sed return SDValue(); 6581193323Sed} 6582193323Sed 6583193323SedSDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 6584193323Sed SDValue N0 = N->getOperand(0); 6585193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6586198090Srdivacky EVT VT = N->getValueType(0); 6587193323Sed 6588193323Sed // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 6589193323Sed if (N->hasOneUse() && 6590193323Sed N->use_begin()->getOpcode() == ISD::FP_ROUND) 6591193323Sed return SDValue(); 6592193323Sed 6593193323Sed // fold (fp_extend c1fp) -> c1fp 6594243830Sdim if (N0CFP) 6595193323Sed return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0); 6596193323Sed 6597193323Sed // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 6598193323Sed // value of X. 6599193323Sed if (N0.getOpcode() == ISD::FP_ROUND 6600193323Sed && N0.getNode()->getConstantOperandVal(1) == 1) { 6601193323Sed SDValue In = N0.getOperand(0); 6602193323Sed if (In.getValueType() == VT) return In; 6603193323Sed if (VT.bitsLT(In.getValueType())) 6604193323Sed return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, 6605193323Sed In, N0.getOperand(1)); 6606193323Sed return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, In); 6607193323Sed } 6608193323Sed 6609193323Sed // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 6610193323Sed if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() && 6611193323Sed ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 6612193323Sed TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) { 6613193323Sed LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6614218893Sdim SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT, 6615193323Sed LN0->getChain(), 6616218893Sdim LN0->getBasePtr(), LN0->getPointerInfo(), 6617193323Sed N0.getValueType(), 6618203954Srdivacky LN0->isVolatile(), LN0->isNonTemporal(), 6619203954Srdivacky LN0->getAlignment()); 6620193323Sed CombineTo(N, ExtLoad); 6621193323Sed CombineTo(N0.getNode(), 6622193323Sed DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), 6623193323Sed N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)), 6624193323Sed ExtLoad.getValue(1)); 6625193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 6626193323Sed } 6627193323Sed 6628193323Sed return SDValue(); 6629193323Sed} 6630193323Sed 6631193323SedSDValue DAGCombiner::visitFNEG(SDNode *N) { 6632193323Sed SDValue N0 = N->getOperand(0); 6633198396Srdivacky EVT VT = N->getValueType(0); 6634193323Sed 6635243830Sdim if (VT.isVector()) { 6636243830Sdim SDValue FoldedVOp = SimplifyVUnaryOp(N); 6637243830Sdim if (FoldedVOp.getNode()) return FoldedVOp; 6638243830Sdim } 6639243830Sdim 6640234353Sdim if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 6641234353Sdim &DAG.getTarget().Options)) 6642193323Sed return GetNegatedExpression(N0, DAG, LegalOperations); 6643193323Sed 6644193323Sed // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading 6645193323Sed // constant pool values. 6646234353Sdim if (!TLI.isFNegFree(VT) && N0.getOpcode() == ISD::BITCAST && 6647198396Srdivacky !VT.isVector() && 6648198396Srdivacky N0.getNode()->hasOneUse() && 6649198396Srdivacky N0.getOperand(0).getValueType().isInteger()) { 6650193323Sed SDValue Int = N0.getOperand(0); 6651198090Srdivacky EVT IntVT = Int.getValueType(); 6652193323Sed if (IntVT.isInteger() && !IntVT.isVector()) { 6653193323Sed Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int, 6654193323Sed DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); 6655193323Sed AddToWorkList(Int.getNode()); 6656218893Sdim return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6657198396Srdivacky VT, Int); 6658193323Sed } 6659193323Sed } 6660193323Sed 6661243830Sdim // (fneg (fmul c, x)) -> (fmul -c, x) 6662243830Sdim if (N0.getOpcode() == ISD::FMUL) { 6663243830Sdim ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 6664243830Sdim if (CFP1) { 6665243830Sdim return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 6666243830Sdim N0.getOperand(0), 6667243830Sdim DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, 6668243830Sdim N0.getOperand(1))); 6669243830Sdim } 6670243830Sdim } 6671243830Sdim 6672193323Sed return SDValue(); 6673193323Sed} 6674193323Sed 6675239462SdimSDValue DAGCombiner::visitFCEIL(SDNode *N) { 6676239462Sdim SDValue N0 = N->getOperand(0); 6677239462Sdim ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6678239462Sdim EVT VT = N->getValueType(0); 6679239462Sdim 6680239462Sdim // fold (fceil c1) -> fceil(c1) 6681243830Sdim if (N0CFP) 6682239462Sdim return DAG.getNode(ISD::FCEIL, N->getDebugLoc(), VT, N0); 6683239462Sdim 6684239462Sdim return SDValue(); 6685239462Sdim} 6686239462Sdim 6687239462SdimSDValue DAGCombiner::visitFTRUNC(SDNode *N) { 6688239462Sdim SDValue N0 = N->getOperand(0); 6689239462Sdim ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6690239462Sdim EVT VT = N->getValueType(0); 6691239462Sdim 6692239462Sdim // fold (ftrunc c1) -> ftrunc(c1) 6693243830Sdim if (N0CFP) 6694239462Sdim return DAG.getNode(ISD::FTRUNC, N->getDebugLoc(), VT, N0); 6695239462Sdim 6696239462Sdim return SDValue(); 6697239462Sdim} 6698239462Sdim 6699239462SdimSDValue DAGCombiner::visitFFLOOR(SDNode *N) { 6700239462Sdim SDValue N0 = N->getOperand(0); 6701239462Sdim ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6702239462Sdim EVT VT = N->getValueType(0); 6703239462Sdim 6704239462Sdim // fold (ffloor c1) -> ffloor(c1) 6705243830Sdim if (N0CFP) 6706239462Sdim return DAG.getNode(ISD::FFLOOR, N->getDebugLoc(), VT, N0); 6707239462Sdim 6708239462Sdim return SDValue(); 6709239462Sdim} 6710239462Sdim 6711193323SedSDValue DAGCombiner::visitFABS(SDNode *N) { 6712193323Sed SDValue N0 = N->getOperand(0); 6713193323Sed ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6714198090Srdivacky EVT VT = N->getValueType(0); 6715193323Sed 6716243830Sdim if (VT.isVector()) { 6717243830Sdim SDValue FoldedVOp = SimplifyVUnaryOp(N); 6718243830Sdim if (FoldedVOp.getNode()) return FoldedVOp; 6719243830Sdim } 6720243830Sdim 6721193323Sed // fold (fabs c1) -> fabs(c1) 6722243830Sdim if (N0CFP) 6723193323Sed return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6724193323Sed // fold (fabs (fabs x)) -> (fabs x) 6725193323Sed if (N0.getOpcode() == ISD::FABS) 6726193323Sed return N->getOperand(0); 6727193323Sed // fold (fabs (fneg x)) -> (fabs x) 6728193323Sed // fold (fabs (fcopysign x, y)) -> (fabs x) 6729193323Sed if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 6730193323Sed return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0.getOperand(0)); 6731193323Sed 6732193323Sed // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading 6733193323Sed // constant pool values. 6734234353Sdim if (!TLI.isFAbsFree(VT) && 6735234353Sdim N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() && 6736193323Sed N0.getOperand(0).getValueType().isInteger() && 6737193323Sed !N0.getOperand(0).getValueType().isVector()) { 6738193323Sed SDValue Int = N0.getOperand(0); 6739198090Srdivacky EVT IntVT = Int.getValueType(); 6740193323Sed if (IntVT.isInteger() && !IntVT.isVector()) { 6741193323Sed Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int, 6742193323Sed DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); 6743193323Sed AddToWorkList(Int.getNode()); 6744218893Sdim return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6745193323Sed N->getValueType(0), Int); 6746193323Sed } 6747193323Sed } 6748193323Sed 6749193323Sed return SDValue(); 6750193323Sed} 6751193323Sed 6752193323SedSDValue DAGCombiner::visitBRCOND(SDNode *N) { 6753193323Sed SDValue Chain = N->getOperand(0); 6754193323Sed SDValue N1 = N->getOperand(1); 6755193323Sed SDValue N2 = N->getOperand(2); 6756193323Sed 6757199481Srdivacky // If N is a constant we could fold this into a fallthrough or unconditional 6758199481Srdivacky // branch. However that doesn't happen very often in normal code, because 6759199481Srdivacky // Instcombine/SimplifyCFG should have handled the available opportunities. 6760199481Srdivacky // If we did this folding here, it would be necessary to update the 6761199481Srdivacky // MachineBasicBlock CFG, which is awkward. 6762199481Srdivacky 6763193323Sed // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 6764193323Sed // on the target. 6765193323Sed if (N1.getOpcode() == ISD::SETCC && 6766249423Sdim TLI.isOperationLegalOrCustom(ISD::BR_CC, 6767249423Sdim N1.getOperand(0).getValueType())) { 6768193323Sed return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other, 6769193323Sed Chain, N1.getOperand(2), 6770193323Sed N1.getOperand(0), N1.getOperand(1), N2); 6771193323Sed } 6772193323Sed 6773218893Sdim if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 6774218893Sdim ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 6775218893Sdim (N1.getOperand(0).hasOneUse() && 6776218893Sdim N1.getOperand(0).getOpcode() == ISD::SRL))) { 6777218893Sdim SDNode *Trunc = 0; 6778218893Sdim if (N1.getOpcode() == ISD::TRUNCATE) { 6779218893Sdim // Look pass the truncate. 6780218893Sdim Trunc = N1.getNode(); 6781218893Sdim N1 = N1.getOperand(0); 6782218893Sdim } 6783202375Srdivacky 6784193323Sed // Match this pattern so that we can generate simpler code: 6785193323Sed // 6786193323Sed // %a = ... 6787193323Sed // %b = and i32 %a, 2 6788193323Sed // %c = srl i32 %b, 1 6789193323Sed // brcond i32 %c ... 6790193323Sed // 6791193323Sed // into 6792218893Sdim // 6793193323Sed // %a = ... 6794202375Srdivacky // %b = and i32 %a, 2 6795193323Sed // %c = setcc eq %b, 0 6796193323Sed // brcond %c ... 6797193323Sed // 6798193323Sed // This applies only when the AND constant value has one bit set and the 6799193323Sed // SRL constant is equal to the log2 of the AND constant. The back-end is 6800193323Sed // smart enough to convert the result into a TEST/JMP sequence. 6801193323Sed SDValue Op0 = N1.getOperand(0); 6802193323Sed SDValue Op1 = N1.getOperand(1); 6803193323Sed 6804193323Sed if (Op0.getOpcode() == ISD::AND && 6805193323Sed Op1.getOpcode() == ISD::Constant) { 6806193323Sed SDValue AndOp1 = Op0.getOperand(1); 6807193323Sed 6808193323Sed if (AndOp1.getOpcode() == ISD::Constant) { 6809193323Sed const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 6810193323Sed 6811193323Sed if (AndConst.isPowerOf2() && 6812193323Sed cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 6813193323Sed SDValue SetCC = 6814193323Sed DAG.getSetCC(N->getDebugLoc(), 6815193323Sed TLI.getSetCCResultType(Op0.getValueType()), 6816193323Sed Op0, DAG.getConstant(0, Op0.getValueType()), 6817193323Sed ISD::SETNE); 6818193323Sed 6819202375Srdivacky SDValue NewBRCond = DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6820202375Srdivacky MVT::Other, Chain, SetCC, N2); 6821202375Srdivacky // Don't add the new BRCond into the worklist or else SimplifySelectCC 6822202375Srdivacky // will convert it back to (X & C1) >> C2. 6823202375Srdivacky CombineTo(N, NewBRCond, false); 6824202375Srdivacky // Truncate is dead. 6825202375Srdivacky if (Trunc) { 6826202375Srdivacky removeFromWorkList(Trunc); 6827202375Srdivacky DAG.DeleteNode(Trunc); 6828202375Srdivacky } 6829193323Sed // Replace the uses of SRL with SETCC 6830204642Srdivacky WorkListRemover DeadNodes(*this); 6831239462Sdim DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 6832193323Sed removeFromWorkList(N1.getNode()); 6833193323Sed DAG.DeleteNode(N1.getNode()); 6834202375Srdivacky return SDValue(N, 0); // Return N so it doesn't get rechecked! 6835193323Sed } 6836193323Sed } 6837193323Sed } 6838218893Sdim 6839218893Sdim if (Trunc) 6840218893Sdim // Restore N1 if the above transformation doesn't match. 6841218893Sdim N1 = N->getOperand(1); 6842193323Sed } 6843218893Sdim 6844204642Srdivacky // Transform br(xor(x, y)) -> br(x != y) 6845204642Srdivacky // Transform br(xor(xor(x,y), 1)) -> br (x == y) 6846204642Srdivacky if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 6847204642Srdivacky SDNode *TheXor = N1.getNode(); 6848204642Srdivacky SDValue Op0 = TheXor->getOperand(0); 6849204642Srdivacky SDValue Op1 = TheXor->getOperand(1); 6850204642Srdivacky if (Op0.getOpcode() == Op1.getOpcode()) { 6851204642Srdivacky // Avoid missing important xor optimizations. 6852204642Srdivacky SDValue Tmp = visitXOR(TheXor); 6853249423Sdim if (Tmp.getNode()) { 6854249423Sdim if (Tmp.getNode() != TheXor) { 6855249423Sdim DEBUG(dbgs() << "\nReplacing.8 "; 6856249423Sdim TheXor->dump(&DAG); 6857249423Sdim dbgs() << "\nWith: "; 6858249423Sdim Tmp.getNode()->dump(&DAG); 6859249423Sdim dbgs() << '\n'); 6860249423Sdim WorkListRemover DeadNodes(*this); 6861249423Sdim DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 6862249423Sdim removeFromWorkList(TheXor); 6863249423Sdim DAG.DeleteNode(TheXor); 6864249423Sdim return DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6865249423Sdim MVT::Other, Chain, Tmp, N2); 6866249423Sdim } 6867249423Sdim 6868249423Sdim // visitXOR has changed XOR's operands or replaced the XOR completely, 6869249423Sdim // bail out. 6870249423Sdim return SDValue(N, 0); 6871204642Srdivacky } 6872204642Srdivacky } 6873193323Sed 6874204642Srdivacky if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 6875204642Srdivacky bool Equal = false; 6876204642Srdivacky if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0)) 6877204642Srdivacky if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() && 6878204642Srdivacky Op0.getOpcode() == ISD::XOR) { 6879204642Srdivacky TheXor = Op0.getNode(); 6880204642Srdivacky Equal = true; 6881204642Srdivacky } 6882204642Srdivacky 6883218893Sdim EVT SetCCVT = N1.getValueType(); 6884204642Srdivacky if (LegalTypes) 6885204642Srdivacky SetCCVT = TLI.getSetCCResultType(SetCCVT); 6886204642Srdivacky SDValue SetCC = DAG.getSetCC(TheXor->getDebugLoc(), 6887204642Srdivacky SetCCVT, 6888204642Srdivacky Op0, Op1, 6889204642Srdivacky Equal ? ISD::SETEQ : ISD::SETNE); 6890204642Srdivacky // Replace the uses of XOR with SETCC 6891204642Srdivacky WorkListRemover DeadNodes(*this); 6892239462Sdim DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 6893218893Sdim removeFromWorkList(N1.getNode()); 6894218893Sdim DAG.DeleteNode(N1.getNode()); 6895204642Srdivacky return DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6896204642Srdivacky MVT::Other, Chain, SetCC, N2); 6897204642Srdivacky } 6898204642Srdivacky } 6899204642Srdivacky 6900193323Sed return SDValue(); 6901193323Sed} 6902193323Sed 6903193323Sed// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 6904193323Sed// 6905193323SedSDValue DAGCombiner::visitBR_CC(SDNode *N) { 6906193323Sed CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 6907193323Sed SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 6908193323Sed 6909199481Srdivacky // If N is a constant we could fold this into a fallthrough or unconditional 6910199481Srdivacky // branch. However that doesn't happen very often in normal code, because 6911199481Srdivacky // Instcombine/SimplifyCFG should have handled the available opportunities. 6912199481Srdivacky // If we did this folding here, it would be necessary to update the 6913199481Srdivacky // MachineBasicBlock CFG, which is awkward. 6914199481Srdivacky 6915193323Sed // Use SimplifySetCC to simplify SETCC's. 6916193323Sed SDValue Simp = SimplifySetCC(TLI.getSetCCResultType(CondLHS.getValueType()), 6917193323Sed CondLHS, CondRHS, CC->get(), N->getDebugLoc(), 6918193323Sed false); 6919193323Sed if (Simp.getNode()) AddToWorkList(Simp.getNode()); 6920193323Sed 6921193323Sed // fold to a simpler setcc 6922193323Sed if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 6923193323Sed return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other, 6924193323Sed N->getOperand(0), Simp.getOperand(2), 6925193323Sed Simp.getOperand(0), Simp.getOperand(1), 6926193323Sed N->getOperand(4)); 6927193323Sed 6928193323Sed return SDValue(); 6929193323Sed} 6930193323Sed 6931234353Sdim/// canFoldInAddressingMode - Return true if 'Use' is a load or a store that 6932234353Sdim/// uses N as its base pointer and that N may be folded in the load / store 6933234353Sdim/// addressing mode. 6934234353Sdimstatic bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 6935234353Sdim SelectionDAG &DAG, 6936234353Sdim const TargetLowering &TLI) { 6937234353Sdim EVT VT; 6938234353Sdim if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 6939234353Sdim if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 6940234353Sdim return false; 6941234353Sdim VT = Use->getValueType(0); 6942234353Sdim } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 6943234353Sdim if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 6944234353Sdim return false; 6945234353Sdim VT = ST->getValue().getValueType(); 6946234353Sdim } else 6947234353Sdim return false; 6948234353Sdim 6949249423Sdim TargetLowering::AddrMode AM; 6950234353Sdim if (N->getOpcode() == ISD::ADD) { 6951234353Sdim ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6952234353Sdim if (Offset) 6953234353Sdim // [reg +/- imm] 6954234353Sdim AM.BaseOffs = Offset->getSExtValue(); 6955234353Sdim else 6956234353Sdim // [reg +/- reg] 6957234353Sdim AM.Scale = 1; 6958234353Sdim } else if (N->getOpcode() == ISD::SUB) { 6959234353Sdim ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6960234353Sdim if (Offset) 6961234353Sdim // [reg +/- imm] 6962234353Sdim AM.BaseOffs = -Offset->getSExtValue(); 6963234353Sdim else 6964234353Sdim // [reg +/- reg] 6965234353Sdim AM.Scale = 1; 6966234353Sdim } else 6967234353Sdim return false; 6968234353Sdim 6969234353Sdim return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext())); 6970234353Sdim} 6971234353Sdim 6972193323Sed/// CombineToPreIndexedLoadStore - Try turning a load / store into a 6973193323Sed/// pre-indexed load / store when the base pointer is an add or subtract 6974193323Sed/// and it has other uses besides the load / store. After the 6975193323Sed/// transformation, the new indexed load / store has effectively folded 6976193323Sed/// the add / subtract in and all of its other uses are redirected to the 6977193323Sed/// new load / store. 6978193323Sedbool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 6979234353Sdim if (Level < AfterLegalizeDAG) 6980193323Sed return false; 6981193323Sed 6982193323Sed bool isLoad = true; 6983193323Sed SDValue Ptr; 6984198090Srdivacky EVT VT; 6985193323Sed if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6986193323Sed if (LD->isIndexed()) 6987193323Sed return false; 6988193323Sed VT = LD->getMemoryVT(); 6989193323Sed if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 6990193323Sed !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 6991193323Sed return false; 6992193323Sed Ptr = LD->getBasePtr(); 6993193323Sed } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6994193323Sed if (ST->isIndexed()) 6995193323Sed return false; 6996193323Sed VT = ST->getMemoryVT(); 6997193323Sed if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 6998193323Sed !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 6999193323Sed return false; 7000193323Sed Ptr = ST->getBasePtr(); 7001193323Sed isLoad = false; 7002193323Sed } else { 7003193323Sed return false; 7004193323Sed } 7005193323Sed 7006193323Sed // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 7007193323Sed // out. There is no reason to make this a preinc/predec. 7008193323Sed if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 7009193323Sed Ptr.getNode()->hasOneUse()) 7010193323Sed return false; 7011193323Sed 7012193323Sed // Ask the target to do addressing mode selection. 7013193323Sed SDValue BasePtr; 7014193323Sed SDValue Offset; 7015193323Sed ISD::MemIndexedMode AM = ISD::UNINDEXED; 7016193323Sed if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 7017193323Sed return false; 7018249423Sdim 7019249423Sdim // Backends without true r+i pre-indexed forms may need to pass a 7020249423Sdim // constant base with a variable offset so that constant coercion 7021249423Sdim // will work with the patterns in canonical form. 7022249423Sdim bool Swapped = false; 7023249423Sdim if (isa<ConstantSDNode>(BasePtr)) { 7024249423Sdim std::swap(BasePtr, Offset); 7025249423Sdim Swapped = true; 7026249423Sdim } 7027249423Sdim 7028193323Sed // Don't create a indexed load / store with zero offset. 7029193323Sed if (isa<ConstantSDNode>(Offset) && 7030193323Sed cast<ConstantSDNode>(Offset)->isNullValue()) 7031193323Sed return false; 7032193323Sed 7033193323Sed // Try turning it into a pre-indexed load / store except when: 7034193323Sed // 1) The new base ptr is a frame index. 7035193323Sed // 2) If N is a store and the new base ptr is either the same as or is a 7036193323Sed // predecessor of the value being stored. 7037193323Sed // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 7038193323Sed // that would create a cycle. 7039193323Sed // 4) All uses are load / store ops that use it as old base ptr. 7040193323Sed 7041193323Sed // Check #1. Preinc'ing a frame index would require copying the stack pointer 7042193323Sed // (plus the implicit offset) to a register to preinc anyway. 7043193323Sed if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 7044193323Sed return false; 7045193323Sed 7046193323Sed // Check #2. 7047193323Sed if (!isLoad) { 7048193323Sed SDValue Val = cast<StoreSDNode>(N)->getValue(); 7049193323Sed if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 7050193323Sed return false; 7051193323Sed } 7052193323Sed 7053249423Sdim // If the offset is a constant, there may be other adds of constants that 7054249423Sdim // can be folded with this one. We should do this to avoid having to keep 7055249423Sdim // a copy of the original base pointer. 7056249423Sdim SmallVector<SDNode *, 16> OtherUses; 7057249423Sdim if (isa<ConstantSDNode>(Offset)) 7058249423Sdim for (SDNode::use_iterator I = BasePtr.getNode()->use_begin(), 7059249423Sdim E = BasePtr.getNode()->use_end(); I != E; ++I) { 7060249423Sdim SDNode *Use = *I; 7061249423Sdim if (Use == Ptr.getNode()) 7062249423Sdim continue; 7063249423Sdim 7064249423Sdim if (Use->isPredecessorOf(N)) 7065249423Sdim continue; 7066249423Sdim 7067249423Sdim if (Use->getOpcode() != ISD::ADD && Use->getOpcode() != ISD::SUB) { 7068249423Sdim OtherUses.clear(); 7069249423Sdim break; 7070249423Sdim } 7071249423Sdim 7072249423Sdim SDValue Op0 = Use->getOperand(0), Op1 = Use->getOperand(1); 7073249423Sdim if (Op1.getNode() == BasePtr.getNode()) 7074249423Sdim std::swap(Op0, Op1); 7075249423Sdim assert(Op0.getNode() == BasePtr.getNode() && 7076249423Sdim "Use of ADD/SUB but not an operand"); 7077249423Sdim 7078249423Sdim if (!isa<ConstantSDNode>(Op1)) { 7079249423Sdim OtherUses.clear(); 7080249423Sdim break; 7081249423Sdim } 7082249423Sdim 7083249423Sdim // FIXME: In some cases, we can be smarter about this. 7084249423Sdim if (Op1.getValueType() != Offset.getValueType()) { 7085249423Sdim OtherUses.clear(); 7086249423Sdim break; 7087249423Sdim } 7088249423Sdim 7089249423Sdim OtherUses.push_back(Use); 7090249423Sdim } 7091249423Sdim 7092249423Sdim if (Swapped) 7093249423Sdim std::swap(BasePtr, Offset); 7094249423Sdim 7095193323Sed // Now check for #3 and #4. 7096193323Sed bool RealUse = false; 7097224145Sdim 7098224145Sdim // Caches for hasPredecessorHelper 7099224145Sdim SmallPtrSet<const SDNode *, 32> Visited; 7100224145Sdim SmallVector<const SDNode *, 16> Worklist; 7101224145Sdim 7102193323Sed for (SDNode::use_iterator I = Ptr.getNode()->use_begin(), 7103193323Sed E = Ptr.getNode()->use_end(); I != E; ++I) { 7104193323Sed SDNode *Use = *I; 7105193323Sed if (Use == N) 7106193323Sed continue; 7107224145Sdim if (N->hasPredecessorHelper(Use, Visited, Worklist)) 7108193323Sed return false; 7109193323Sed 7110234353Sdim // If Ptr may be folded in addressing mode of other use, then it's 7111234353Sdim // not profitable to do this transformation. 7112234353Sdim if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 7113193323Sed RealUse = true; 7114193323Sed } 7115193323Sed 7116193323Sed if (!RealUse) 7117193323Sed return false; 7118193323Sed 7119193323Sed SDValue Result; 7120193323Sed if (isLoad) 7121193323Sed Result = DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(), 7122193323Sed BasePtr, Offset, AM); 7123193323Sed else 7124193323Sed Result = DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(), 7125193323Sed BasePtr, Offset, AM); 7126193323Sed ++PreIndexedNodes; 7127193323Sed ++NodesCombined; 7128202375Srdivacky DEBUG(dbgs() << "\nReplacing.4 "; 7129198090Srdivacky N->dump(&DAG); 7130202375Srdivacky dbgs() << "\nWith: "; 7131198090Srdivacky Result.getNode()->dump(&DAG); 7132202375Srdivacky dbgs() << '\n'); 7133193323Sed WorkListRemover DeadNodes(*this); 7134193323Sed if (isLoad) { 7135239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 7136239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 7137193323Sed } else { 7138239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 7139193323Sed } 7140193323Sed 7141193323Sed // Finally, since the node is now dead, remove it from the graph. 7142193323Sed DAG.DeleteNode(N); 7143193323Sed 7144249423Sdim if (Swapped) 7145249423Sdim std::swap(BasePtr, Offset); 7146249423Sdim 7147249423Sdim // Replace other uses of BasePtr that can be updated to use Ptr 7148249423Sdim for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) { 7149249423Sdim unsigned OffsetIdx = 1; 7150249423Sdim if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode()) 7151249423Sdim OffsetIdx = 0; 7152249423Sdim assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == 7153249423Sdim BasePtr.getNode() && "Expected BasePtr operand"); 7154249423Sdim 7155251662Sdim // We need to replace ptr0 in the following expression: 7156251662Sdim // x0 * offset0 + y0 * ptr0 = t0 7157251662Sdim // knowing that 7158251662Sdim // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) 7159251662Sdim // 7160251662Sdim // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the 7161251662Sdim // indexed load/store and the expresion that needs to be re-written. 7162251662Sdim // 7163251662Sdim // Therefore, we have: 7164251662Sdim // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 7165249423Sdim 7166249423Sdim ConstantSDNode *CN = 7167249423Sdim cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); 7168251662Sdim int X0, X1, Y0, Y1; 7169251662Sdim APInt Offset0 = CN->getAPIntValue(); 7170251662Sdim APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); 7171249423Sdim 7172251662Sdim X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; 7173251662Sdim Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; 7174251662Sdim X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; 7175251662Sdim Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; 7176249423Sdim 7177251662Sdim unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; 7178251662Sdim 7179251662Sdim APInt CNV = Offset0; 7180251662Sdim if (X0 < 0) CNV = -CNV; 7181251662Sdim if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; 7182251662Sdim else CNV = CNV - Offset1; 7183251662Sdim 7184251662Sdim // We can now generate the new expression. 7185251662Sdim SDValue NewOp1 = DAG.getConstant(CNV, CN->getValueType(0)); 7186251662Sdim SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); 7187251662Sdim 7188251662Sdim SDValue NewUse = DAG.getNode(Opcode, 7189249423Sdim OtherUses[i]->getDebugLoc(), 7190249423Sdim OtherUses[i]->getValueType(0), NewOp1, NewOp2); 7191249423Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); 7192249423Sdim removeFromWorkList(OtherUses[i]); 7193249423Sdim DAG.DeleteNode(OtherUses[i]); 7194249423Sdim } 7195249423Sdim 7196193323Sed // Replace the uses of Ptr with uses of the updated base value. 7197239462Sdim DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 7198193323Sed removeFromWorkList(Ptr.getNode()); 7199193323Sed DAG.DeleteNode(Ptr.getNode()); 7200193323Sed 7201193323Sed return true; 7202193323Sed} 7203193323Sed 7204193323Sed/// CombineToPostIndexedLoadStore - Try to combine a load / store with a 7205193323Sed/// add / sub of the base pointer node into a post-indexed load / store. 7206193323Sed/// The transformation folded the add / subtract into the new indexed 7207193323Sed/// load / store effectively and all of its uses are redirected to the 7208193323Sed/// new load / store. 7209193323Sedbool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 7210234353Sdim if (Level < AfterLegalizeDAG) 7211193323Sed return false; 7212193323Sed 7213193323Sed bool isLoad = true; 7214193323Sed SDValue Ptr; 7215198090Srdivacky EVT VT; 7216193323Sed if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 7217193323Sed if (LD->isIndexed()) 7218193323Sed return false; 7219193323Sed VT = LD->getMemoryVT(); 7220193323Sed if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 7221193323Sed !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 7222193323Sed return false; 7223193323Sed Ptr = LD->getBasePtr(); 7224193323Sed } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 7225193323Sed if (ST->isIndexed()) 7226193323Sed return false; 7227193323Sed VT = ST->getMemoryVT(); 7228193323Sed if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 7229193323Sed !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 7230193323Sed return false; 7231193323Sed Ptr = ST->getBasePtr(); 7232193323Sed isLoad = false; 7233193323Sed } else { 7234193323Sed return false; 7235193323Sed } 7236193323Sed 7237193323Sed if (Ptr.getNode()->hasOneUse()) 7238193323Sed return false; 7239193323Sed 7240193323Sed for (SDNode::use_iterator I = Ptr.getNode()->use_begin(), 7241193323Sed E = Ptr.getNode()->use_end(); I != E; ++I) { 7242193323Sed SDNode *Op = *I; 7243193323Sed if (Op == N || 7244193323Sed (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 7245193323Sed continue; 7246193323Sed 7247193323Sed SDValue BasePtr; 7248193323Sed SDValue Offset; 7249193323Sed ISD::MemIndexedMode AM = ISD::UNINDEXED; 7250193323Sed if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 7251193323Sed // Don't create a indexed load / store with zero offset. 7252193323Sed if (isa<ConstantSDNode>(Offset) && 7253193323Sed cast<ConstantSDNode>(Offset)->isNullValue()) 7254193323Sed continue; 7255193323Sed 7256193323Sed // Try turning it into a post-indexed load / store except when 7257234353Sdim // 1) All uses are load / store ops that use it as base ptr (and 7258234353Sdim // it may be folded as addressing mmode). 7259193323Sed // 2) Op must be independent of N, i.e. Op is neither a predecessor 7260193323Sed // nor a successor of N. Otherwise, if Op is folded that would 7261193323Sed // create a cycle. 7262193323Sed 7263193323Sed if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 7264193323Sed continue; 7265193323Sed 7266193323Sed // Check for #1. 7267193323Sed bool TryNext = false; 7268193323Sed for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(), 7269193323Sed EE = BasePtr.getNode()->use_end(); II != EE; ++II) { 7270193323Sed SDNode *Use = *II; 7271193323Sed if (Use == Ptr.getNode()) 7272193323Sed continue; 7273193323Sed 7274193323Sed // If all the uses are load / store addresses, then don't do the 7275193323Sed // transformation. 7276193323Sed if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 7277193323Sed bool RealUse = false; 7278193323Sed for (SDNode::use_iterator III = Use->use_begin(), 7279193323Sed EEE = Use->use_end(); III != EEE; ++III) { 7280193323Sed SDNode *UseUse = *III; 7281234353Sdim if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 7282193323Sed RealUse = true; 7283193323Sed } 7284193323Sed 7285193323Sed if (!RealUse) { 7286193323Sed TryNext = true; 7287193323Sed break; 7288193323Sed } 7289193323Sed } 7290193323Sed } 7291193323Sed 7292193323Sed if (TryNext) 7293193323Sed continue; 7294193323Sed 7295193323Sed // Check for #2 7296193323Sed if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 7297193323Sed SDValue Result = isLoad 7298193323Sed ? DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(), 7299193323Sed BasePtr, Offset, AM) 7300193323Sed : DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(), 7301193323Sed BasePtr, Offset, AM); 7302193323Sed ++PostIndexedNodes; 7303193323Sed ++NodesCombined; 7304202375Srdivacky DEBUG(dbgs() << "\nReplacing.5 "; 7305198090Srdivacky N->dump(&DAG); 7306202375Srdivacky dbgs() << "\nWith: "; 7307198090Srdivacky Result.getNode()->dump(&DAG); 7308202375Srdivacky dbgs() << '\n'); 7309193323Sed WorkListRemover DeadNodes(*this); 7310193323Sed if (isLoad) { 7311239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 7312239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 7313193323Sed } else { 7314239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 7315193323Sed } 7316193323Sed 7317193323Sed // Finally, since the node is now dead, remove it from the graph. 7318193323Sed DAG.DeleteNode(N); 7319193323Sed 7320193323Sed // Replace the uses of Use with uses of the updated base value. 7321193323Sed DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 7322239462Sdim Result.getValue(isLoad ? 1 : 0)); 7323193323Sed removeFromWorkList(Op); 7324193323Sed DAG.DeleteNode(Op); 7325193323Sed return true; 7326193323Sed } 7327193323Sed } 7328193323Sed } 7329193323Sed 7330193323Sed return false; 7331193323Sed} 7332193323Sed 7333193323SedSDValue DAGCombiner::visitLOAD(SDNode *N) { 7334193323Sed LoadSDNode *LD = cast<LoadSDNode>(N); 7335193323Sed SDValue Chain = LD->getChain(); 7336193323Sed SDValue Ptr = LD->getBasePtr(); 7337193323Sed 7338193323Sed // If load is not volatile and there are no uses of the loaded value (and 7339193323Sed // the updated indexed value in case of indexed loads), change uses of the 7340193323Sed // chain value into uses of the chain input (i.e. delete the dead load). 7341193323Sed if (!LD->isVolatile()) { 7342193323Sed if (N->getValueType(1) == MVT::Other) { 7343193323Sed // Unindexed loads. 7344234353Sdim if (!N->hasAnyUseOfValue(0)) { 7345193323Sed // It's not safe to use the two value CombineTo variant here. e.g. 7346193323Sed // v1, chain2 = load chain1, loc 7347193323Sed // v2, chain3 = load chain2, loc 7348193323Sed // v3 = add v2, c 7349193323Sed // Now we replace use of chain2 with chain1. This makes the second load 7350193323Sed // isomorphic to the one we are deleting, and thus makes this load live. 7351202375Srdivacky DEBUG(dbgs() << "\nReplacing.6 "; 7352198090Srdivacky N->dump(&DAG); 7353202375Srdivacky dbgs() << "\nWith chain: "; 7354198090Srdivacky Chain.getNode()->dump(&DAG); 7355202375Srdivacky dbgs() << "\n"); 7356193323Sed WorkListRemover DeadNodes(*this); 7357239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 7358193323Sed 7359193323Sed if (N->use_empty()) { 7360193323Sed removeFromWorkList(N); 7361193323Sed DAG.DeleteNode(N); 7362193323Sed } 7363193323Sed 7364193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 7365193323Sed } 7366193323Sed } else { 7367193323Sed // Indexed loads. 7368193323Sed assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 7369234353Sdim if (!N->hasAnyUseOfValue(0) && !N->hasAnyUseOfValue(1)) { 7370193323Sed SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 7371204642Srdivacky DEBUG(dbgs() << "\nReplacing.7 "; 7372198090Srdivacky N->dump(&DAG); 7373202375Srdivacky dbgs() << "\nWith: "; 7374198090Srdivacky Undef.getNode()->dump(&DAG); 7375202375Srdivacky dbgs() << " and 2 other values\n"); 7376193323Sed WorkListRemover DeadNodes(*this); 7377239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 7378193323Sed DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), 7379239462Sdim DAG.getUNDEF(N->getValueType(1))); 7380239462Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 7381193323Sed removeFromWorkList(N); 7382193323Sed DAG.DeleteNode(N); 7383193323Sed return SDValue(N, 0); // Return N so it doesn't get rechecked! 7384193323Sed } 7385193323Sed } 7386193323Sed } 7387193323Sed 7388193323Sed // If this load is directly stored, replace the load value with the stored 7389193323Sed // value. 7390193323Sed // TODO: Handle store large -> read small portion. 7391193323Sed // TODO: Handle TRUNCSTORE/LOADEXT 7392221345Sdim if (ISD::isNormalLoad(N) && !LD->isVolatile()) { 7393193323Sed if (ISD::isNON_TRUNCStore(Chain.getNode())) { 7394193323Sed StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 7395193323Sed if (PrevST->getBasePtr() == Ptr && 7396193323Sed PrevST->getValue().getValueType() == N->getValueType(0)) 7397193323Sed return CombineTo(N, Chain.getOperand(1), Chain); 7398193323Sed } 7399193323Sed } 7400193323Sed 7401206083Srdivacky // Try to infer better alignment information than the load already has. 7402206083Srdivacky if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 7403206083Srdivacky if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 7404249423Sdim if (Align > LD->getMemOperand()->getBaseAlignment()) { 7405249423Sdim SDValue NewLoad = 7406249423Sdim DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(), 7407218893Sdim LD->getValueType(0), 7408218893Sdim Chain, Ptr, LD->getPointerInfo(), 7409218893Sdim LD->getMemoryVT(), 7410206083Srdivacky LD->isVolatile(), LD->isNonTemporal(), Align); 7411249423Sdim return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true); 7412249423Sdim } 7413206083Srdivacky } 7414206083Srdivacky } 7415206083Srdivacky 7416193323Sed if (CombinerAA) { 7417193323Sed // Walk up chain skipping non-aliasing memory nodes. 7418193323Sed SDValue BetterChain = FindBetterChain(N, Chain); 7419193323Sed 7420193323Sed // If there is a better chain. 7421193323Sed if (Chain != BetterChain) { 7422193323Sed SDValue ReplLoad; 7423193323Sed 7424193323Sed // Replace the chain to void dependency. 7425193323Sed if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 7426193323Sed ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(), 7427218893Sdim BetterChain, Ptr, LD->getPointerInfo(), 7428203954Srdivacky LD->isVolatile(), LD->isNonTemporal(), 7429234353Sdim LD->isInvariant(), LD->getAlignment()); 7430193323Sed } else { 7431218893Sdim ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(), 7432218893Sdim LD->getValueType(0), 7433218893Sdim BetterChain, Ptr, LD->getPointerInfo(), 7434193323Sed LD->getMemoryVT(), 7435193323Sed LD->isVolatile(), 7436203954Srdivacky LD->isNonTemporal(), 7437193323Sed LD->getAlignment()); 7438193323Sed } 7439193323Sed 7440193323Sed // Create token factor to keep old chain connected. 7441193323Sed SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 7442193323Sed MVT::Other, Chain, ReplLoad.getValue(1)); 7443218893Sdim 7444198090Srdivacky // Make sure the new and old chains are cleaned up. 7445198090Srdivacky AddToWorkList(Token.getNode()); 7446218893Sdim 7447193323Sed // Replace uses with load result and token factor. Don't add users 7448193323Sed // to work list. 7449193323Sed return CombineTo(N, ReplLoad.getValue(0), Token, false); 7450193323Sed } 7451193323Sed } 7452193323Sed 7453193323Sed // Try transforming N to an indexed load. 7454193323Sed if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 7455193323Sed return SDValue(N, 0); 7456193323Sed 7457193323Sed return SDValue(); 7458193323Sed} 7459193323Sed 7460207618Srdivacky/// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the 7461207618Srdivacky/// load is having specific bytes cleared out. If so, return the byte size 7462207618Srdivacky/// being masked out and the shift amount. 7463207618Srdivackystatic std::pair<unsigned, unsigned> 7464207618SrdivackyCheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 7465207618Srdivacky std::pair<unsigned, unsigned> Result(0, 0); 7466218893Sdim 7467207618Srdivacky // Check for the structure we're looking for. 7468207618Srdivacky if (V->getOpcode() != ISD::AND || 7469207618Srdivacky !isa<ConstantSDNode>(V->getOperand(1)) || 7470207618Srdivacky !ISD::isNormalLoad(V->getOperand(0).getNode())) 7471207618Srdivacky return Result; 7472218893Sdim 7473207618Srdivacky // Check the chain and pointer. 7474207618Srdivacky LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 7475207618Srdivacky if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 7476218893Sdim 7477207618Srdivacky // The store should be chained directly to the load or be an operand of a 7478207618Srdivacky // tokenfactor. 7479207618Srdivacky if (LD == Chain.getNode()) 7480207618Srdivacky ; // ok. 7481207618Srdivacky else if (Chain->getOpcode() != ISD::TokenFactor) 7482207618Srdivacky return Result; // Fail. 7483207618Srdivacky else { 7484207618Srdivacky bool isOk = false; 7485207618Srdivacky for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i) 7486207618Srdivacky if (Chain->getOperand(i).getNode() == LD) { 7487207618Srdivacky isOk = true; 7488207618Srdivacky break; 7489207618Srdivacky } 7490207618Srdivacky if (!isOk) return Result; 7491207618Srdivacky } 7492218893Sdim 7493207618Srdivacky // This only handles simple types. 7494207618Srdivacky if (V.getValueType() != MVT::i16 && 7495207618Srdivacky V.getValueType() != MVT::i32 && 7496207618Srdivacky V.getValueType() != MVT::i64) 7497207618Srdivacky return Result; 7498193323Sed 7499207618Srdivacky // Check the constant mask. Invert it so that the bits being masked out are 7500207618Srdivacky // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 7501207618Srdivacky // follow the sign bit for uniformity. 7502207618Srdivacky uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 7503207618Srdivacky unsigned NotMaskLZ = CountLeadingZeros_64(NotMask); 7504207618Srdivacky if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 7505207618Srdivacky unsigned NotMaskTZ = CountTrailingZeros_64(NotMask); 7506207618Srdivacky if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 7507207618Srdivacky if (NotMaskLZ == 64) return Result; // All zero mask. 7508218893Sdim 7509207618Srdivacky // See if we have a continuous run of bits. If so, we have 0*1+0* 7510207618Srdivacky if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64) 7511207618Srdivacky return Result; 7512207618Srdivacky 7513207618Srdivacky // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 7514207618Srdivacky if (V.getValueType() != MVT::i64 && NotMaskLZ) 7515207618Srdivacky NotMaskLZ -= 64-V.getValueSizeInBits(); 7516218893Sdim 7517207618Srdivacky unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 7518207618Srdivacky switch (MaskedBytes) { 7519218893Sdim case 1: 7520218893Sdim case 2: 7521207618Srdivacky case 4: break; 7522207618Srdivacky default: return Result; // All one mask, or 5-byte mask. 7523207618Srdivacky } 7524218893Sdim 7525207618Srdivacky // Verify that the first bit starts at a multiple of mask so that the access 7526207618Srdivacky // is aligned the same as the access width. 7527207618Srdivacky if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 7528218893Sdim 7529207618Srdivacky Result.first = MaskedBytes; 7530207618Srdivacky Result.second = NotMaskTZ/8; 7531207618Srdivacky return Result; 7532207618Srdivacky} 7533207618Srdivacky 7534207618Srdivacky 7535207618Srdivacky/// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that 7536207618Srdivacky/// provides a value as specified by MaskInfo. If so, replace the specified 7537207618Srdivacky/// store with a narrower store of truncated IVal. 7538207618Srdivackystatic SDNode * 7539207618SrdivackyShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 7540207618Srdivacky SDValue IVal, StoreSDNode *St, 7541207618Srdivacky DAGCombiner *DC) { 7542207618Srdivacky unsigned NumBytes = MaskInfo.first; 7543207618Srdivacky unsigned ByteShift = MaskInfo.second; 7544207618Srdivacky SelectionDAG &DAG = DC->getDAG(); 7545218893Sdim 7546207618Srdivacky // Check to see if IVal is all zeros in the part being masked in by the 'or' 7547207618Srdivacky // that uses this. If not, this is not a replacement. 7548207618Srdivacky APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 7549207618Srdivacky ByteShift*8, (ByteShift+NumBytes)*8); 7550207618Srdivacky if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0; 7551218893Sdim 7552207618Srdivacky // Check that it is legal on the target to do this. It is legal if the new 7553207618Srdivacky // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 7554207618Srdivacky // legalization. 7555207618Srdivacky MVT VT = MVT::getIntegerVT(NumBytes*8); 7556207618Srdivacky if (!DC->isTypeLegal(VT)) 7557207618Srdivacky return 0; 7558218893Sdim 7559207618Srdivacky // Okay, we can do this! Replace the 'St' store with a store of IVal that is 7560207618Srdivacky // shifted by ByteShift and truncated down to NumBytes. 7561207618Srdivacky if (ByteShift) 7562207618Srdivacky IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal, 7563219077Sdim DAG.getConstant(ByteShift*8, 7564219077Sdim DC->getShiftAmountTy(IVal.getValueType()))); 7565207618Srdivacky 7566207618Srdivacky // Figure out the offset for the store and the alignment of the access. 7567207618Srdivacky unsigned StOffset; 7568207618Srdivacky unsigned NewAlign = St->getAlignment(); 7569207618Srdivacky 7570207618Srdivacky if (DAG.getTargetLoweringInfo().isLittleEndian()) 7571207618Srdivacky StOffset = ByteShift; 7572207618Srdivacky else 7573207618Srdivacky StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 7574218893Sdim 7575207618Srdivacky SDValue Ptr = St->getBasePtr(); 7576207618Srdivacky if (StOffset) { 7577207618Srdivacky Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(), 7578207618Srdivacky Ptr, DAG.getConstant(StOffset, Ptr.getValueType())); 7579207618Srdivacky NewAlign = MinAlign(NewAlign, StOffset); 7580207618Srdivacky } 7581218893Sdim 7582207618Srdivacky // Truncate down to the new size. 7583207618Srdivacky IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal); 7584218893Sdim 7585207618Srdivacky ++OpsNarrowed; 7586218893Sdim return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr, 7587218893Sdim St->getPointerInfo().getWithOffset(StOffset), 7588207618Srdivacky false, false, NewAlign).getNode(); 7589207618Srdivacky} 7590207618Srdivacky 7591207618Srdivacky 7592193323Sed/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is 7593193323Sed/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some 7594193323Sed/// of the loaded bits, try narrowing the load and store if it would end up 7595193323Sed/// being a win for performance or code size. 7596193323SedSDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 7597193323Sed StoreSDNode *ST = cast<StoreSDNode>(N); 7598193323Sed if (ST->isVolatile()) 7599193323Sed return SDValue(); 7600193323Sed 7601193323Sed SDValue Chain = ST->getChain(); 7602193323Sed SDValue Value = ST->getValue(); 7603193323Sed SDValue Ptr = ST->getBasePtr(); 7604198090Srdivacky EVT VT = Value.getValueType(); 7605193323Sed 7606193323Sed if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 7607193323Sed return SDValue(); 7608193323Sed 7609193323Sed unsigned Opc = Value.getOpcode(); 7610218893Sdim 7611207618Srdivacky // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 7612207618Srdivacky // is a byte mask indicating a consecutive number of bytes, check to see if 7613207618Srdivacky // Y is known to provide just those bytes. If so, we try to replace the 7614207618Srdivacky // load + replace + store sequence with a single (narrower) store, which makes 7615207618Srdivacky // the load dead. 7616207618Srdivacky if (Opc == ISD::OR) { 7617207618Srdivacky std::pair<unsigned, unsigned> MaskedLoad; 7618207618Srdivacky MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 7619207618Srdivacky if (MaskedLoad.first) 7620207618Srdivacky if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 7621207618Srdivacky Value.getOperand(1), ST,this)) 7622207618Srdivacky return SDValue(NewST, 0); 7623218893Sdim 7624207618Srdivacky // Or is commutative, so try swapping X and Y. 7625207618Srdivacky MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 7626207618Srdivacky if (MaskedLoad.first) 7627207618Srdivacky if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 7628207618Srdivacky Value.getOperand(0), ST,this)) 7629207618Srdivacky return SDValue(NewST, 0); 7630207618Srdivacky } 7631218893Sdim 7632193323Sed if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 7633193323Sed Value.getOperand(1).getOpcode() != ISD::Constant) 7634193323Sed return SDValue(); 7635193323Sed 7636193323Sed SDValue N0 = Value.getOperand(0); 7637212904Sdim if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 7638212904Sdim Chain == SDValue(N0.getNode(), 1)) { 7639193323Sed LoadSDNode *LD = cast<LoadSDNode>(N0); 7640218893Sdim if (LD->getBasePtr() != Ptr || 7641218893Sdim LD->getPointerInfo().getAddrSpace() != 7642218893Sdim ST->getPointerInfo().getAddrSpace()) 7643193323Sed return SDValue(); 7644193323Sed 7645193323Sed // Find the type to narrow it the load / op / store to. 7646193323Sed SDValue N1 = Value.getOperand(1); 7647193323Sed unsigned BitWidth = N1.getValueSizeInBits(); 7648193323Sed APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 7649193323Sed if (Opc == ISD::AND) 7650193323Sed Imm ^= APInt::getAllOnesValue(BitWidth); 7651193323Sed if (Imm == 0 || Imm.isAllOnesValue()) 7652193323Sed return SDValue(); 7653193323Sed unsigned ShAmt = Imm.countTrailingZeros(); 7654193323Sed unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 7655193323Sed unsigned NewBW = NextPowerOf2(MSB - ShAmt); 7656198090Srdivacky EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 7657193323Sed while (NewBW < BitWidth && 7658193323Sed !(TLI.isOperationLegalOrCustom(Opc, NewVT) && 7659193323Sed TLI.isNarrowingProfitable(VT, NewVT))) { 7660193323Sed NewBW = NextPowerOf2(NewBW); 7661198090Srdivacky NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 7662193323Sed } 7663193323Sed if (NewBW >= BitWidth) 7664193323Sed return SDValue(); 7665193323Sed 7666193323Sed // If the lsb changed does not start at the type bitwidth boundary, 7667193323Sed // start at the previous one. 7668193323Sed if (ShAmt % NewBW) 7669193323Sed ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 7670249423Sdim APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, 7671249423Sdim std::min(BitWidth, ShAmt + NewBW)); 7672193323Sed if ((Imm & Mask) == Imm) { 7673193323Sed APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 7674193323Sed if (Opc == ISD::AND) 7675193323Sed NewImm ^= APInt::getAllOnesValue(NewBW); 7676193323Sed uint64_t PtrOff = ShAmt / 8; 7677193323Sed // For big endian targets, we need to adjust the offset to the pointer to 7678193323Sed // load the correct bytes. 7679193323Sed if (TLI.isBigEndian()) 7680193323Sed PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 7681193323Sed 7682193323Sed unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 7683226633Sdim Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 7684243830Sdim if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy)) 7685193323Sed return SDValue(); 7686193323Sed 7687193323Sed SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(), 7688193323Sed Ptr.getValueType(), Ptr, 7689193323Sed DAG.getConstant(PtrOff, Ptr.getValueType())); 7690193323Sed SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(), 7691193323Sed LD->getChain(), NewPtr, 7692218893Sdim LD->getPointerInfo().getWithOffset(PtrOff), 7693203954Srdivacky LD->isVolatile(), LD->isNonTemporal(), 7694234353Sdim LD->isInvariant(), NewAlign); 7695193323Sed SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD, 7696193323Sed DAG.getConstant(NewImm, NewVT)); 7697193323Sed SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(), 7698193323Sed NewVal, NewPtr, 7699218893Sdim ST->getPointerInfo().getWithOffset(PtrOff), 7700203954Srdivacky false, false, NewAlign); 7701193323Sed 7702193323Sed AddToWorkList(NewPtr.getNode()); 7703193323Sed AddToWorkList(NewLD.getNode()); 7704193323Sed AddToWorkList(NewVal.getNode()); 7705193323Sed WorkListRemover DeadNodes(*this); 7706239462Sdim DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 7707193323Sed ++OpsNarrowed; 7708193323Sed return NewST; 7709193323Sed } 7710193323Sed } 7711193323Sed 7712193323Sed return SDValue(); 7713193323Sed} 7714193323Sed 7715218893Sdim/// TransformFPLoadStorePair - For a given floating point load / store pair, 7716218893Sdim/// if the load value isn't used by any other operations, then consider 7717218893Sdim/// transforming the pair to integer load / store operations if the target 7718218893Sdim/// deems the transformation profitable. 7719218893SdimSDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 7720218893Sdim StoreSDNode *ST = cast<StoreSDNode>(N); 7721218893Sdim SDValue Chain = ST->getChain(); 7722218893Sdim SDValue Value = ST->getValue(); 7723218893Sdim if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 7724218893Sdim Value.hasOneUse() && 7725218893Sdim Chain == SDValue(Value.getNode(), 1)) { 7726218893Sdim LoadSDNode *LD = cast<LoadSDNode>(Value); 7727218893Sdim EVT VT = LD->getMemoryVT(); 7728218893Sdim if (!VT.isFloatingPoint() || 7729218893Sdim VT != ST->getMemoryVT() || 7730218893Sdim LD->isNonTemporal() || 7731218893Sdim ST->isNonTemporal() || 7732218893Sdim LD->getPointerInfo().getAddrSpace() != 0 || 7733218893Sdim ST->getPointerInfo().getAddrSpace() != 0) 7734218893Sdim return SDValue(); 7735218893Sdim 7736218893Sdim EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7737218893Sdim if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 7738218893Sdim !TLI.isOperationLegal(ISD::STORE, IntVT) || 7739218893Sdim !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 7740218893Sdim !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 7741218893Sdim return SDValue(); 7742218893Sdim 7743218893Sdim unsigned LDAlign = LD->getAlignment(); 7744218893Sdim unsigned STAlign = ST->getAlignment(); 7745226633Sdim Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 7746243830Sdim unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy); 7747218893Sdim if (LDAlign < ABIAlign || STAlign < ABIAlign) 7748218893Sdim return SDValue(); 7749218893Sdim 7750218893Sdim SDValue NewLD = DAG.getLoad(IntVT, Value.getDebugLoc(), 7751218893Sdim LD->getChain(), LD->getBasePtr(), 7752218893Sdim LD->getPointerInfo(), 7753234353Sdim false, false, false, LDAlign); 7754218893Sdim 7755218893Sdim SDValue NewST = DAG.getStore(NewLD.getValue(1), N->getDebugLoc(), 7756218893Sdim NewLD, ST->getBasePtr(), 7757218893Sdim ST->getPointerInfo(), 7758218893Sdim false, false, STAlign); 7759218893Sdim 7760218893Sdim AddToWorkList(NewLD.getNode()); 7761218893Sdim AddToWorkList(NewST.getNode()); 7762218893Sdim WorkListRemover DeadNodes(*this); 7763239462Sdim DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 7764218893Sdim ++LdStFP2Int; 7765218893Sdim return NewST; 7766218893Sdim } 7767218893Sdim 7768218893Sdim return SDValue(); 7769218893Sdim} 7770218893Sdim 7771249423Sdim/// Helper struct to parse and store a memory address as base + index + offset. 7772249423Sdim/// We ignore sign extensions when it is safe to do so. 7773249423Sdim/// The following two expressions are not equivalent. To differentiate we need 7774249423Sdim/// to store whether there was a sign extension involved in the index 7775249423Sdim/// computation. 7776249423Sdim/// (load (i64 add (i64 copyfromreg %c) 7777249423Sdim/// (i64 signextend (add (i8 load %index) 7778249423Sdim/// (i8 1)))) 7779249423Sdim/// vs 7780249423Sdim/// 7781249423Sdim/// (load (i64 add (i64 copyfromreg %c) 7782249423Sdim/// (i64 signextend (i32 add (i32 signextend (i8 load %index)) 7783249423Sdim/// (i32 1))))) 7784249423Sdimstruct BaseIndexOffset { 7785249423Sdim SDValue Base; 7786249423Sdim SDValue Index; 7787249423Sdim int64_t Offset; 7788249423Sdim bool IsIndexSignExt; 7789249423Sdim 7790249423Sdim BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} 7791249423Sdim 7792249423Sdim BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, 7793249423Sdim bool IsIndexSignExt) : 7794249423Sdim Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} 7795249423Sdim 7796249423Sdim bool equalBaseIndex(const BaseIndexOffset &Other) { 7797249423Sdim return Other.Base == Base && Other.Index == Index && 7798249423Sdim Other.IsIndexSignExt == IsIndexSignExt; 7799249423Sdim } 7800249423Sdim 7801249423Sdim /// Parses tree in Ptr for base, index, offset addresses. 7802249423Sdim static BaseIndexOffset match(SDValue Ptr) { 7803249423Sdim bool IsIndexSignExt = false; 7804249423Sdim 7805249423Sdim // Just Base or possibly anything else. 7806249423Sdim if (Ptr->getOpcode() != ISD::ADD) 7807249423Sdim return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); 7808249423Sdim 7809249423Sdim // Base + offset. 7810249423Sdim if (isa<ConstantSDNode>(Ptr->getOperand(1))) { 7811249423Sdim int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); 7812249423Sdim return BaseIndexOffset(Ptr->getOperand(0), SDValue(), Offset, 7813249423Sdim IsIndexSignExt); 7814249423Sdim } 7815249423Sdim 7816249423Sdim // Look at Base + Index + Offset cases. 7817243830Sdim SDValue Base = Ptr->getOperand(0); 7818249423Sdim SDValue IndexOffset = Ptr->getOperand(1); 7819249423Sdim 7820249423Sdim // Skip signextends. 7821249423Sdim if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { 7822249423Sdim IndexOffset = IndexOffset->getOperand(0); 7823249423Sdim IsIndexSignExt = true; 7824249423Sdim } 7825249423Sdim 7826249423Sdim // Either the case of Base + Index (no offset) or something else. 7827249423Sdim if (IndexOffset->getOpcode() != ISD::ADD) 7828249423Sdim return BaseIndexOffset(Base, IndexOffset, 0, IsIndexSignExt); 7829249423Sdim 7830249423Sdim // Now we have the case of Base + Index + offset. 7831249423Sdim SDValue Index = IndexOffset->getOperand(0); 7832249423Sdim SDValue Offset = IndexOffset->getOperand(1); 7833249423Sdim 7834249423Sdim if (!isa<ConstantSDNode>(Offset)) 7835249423Sdim return BaseIndexOffset(Ptr, SDValue(), 0, IsIndexSignExt); 7836249423Sdim 7837249423Sdim // Ignore signextends. 7838249423Sdim if (Index->getOpcode() == ISD::SIGN_EXTEND) { 7839249423Sdim Index = Index->getOperand(0); 7840249423Sdim IsIndexSignExt = true; 7841249423Sdim } else IsIndexSignExt = false; 7842249423Sdim 7843249423Sdim int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); 7844249423Sdim return BaseIndexOffset(Base, Index, Off, IsIndexSignExt); 7845243830Sdim } 7846249423Sdim}; 7847243830Sdim 7848243830Sdim/// Holds a pointer to an LSBaseSDNode as well as information on where it 7849243830Sdim/// is located in a sequence of memory operations connected by a chain. 7850243830Sdimstruct MemOpLink { 7851243830Sdim MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): 7852243830Sdim MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } 7853243830Sdim // Ptr to the mem node. 7854243830Sdim LSBaseSDNode *MemNode; 7855243830Sdim // Offset from the base ptr. 7856243830Sdim int64_t OffsetFromBase; 7857243830Sdim // What is the sequence number of this mem node. 7858243830Sdim // Lowest mem operand in the DAG starts at zero. 7859243830Sdim unsigned SequenceNum; 7860243830Sdim}; 7861243830Sdim 7862243830Sdim/// Sorts store nodes in a link according to their offset from a shared 7863243830Sdim// base ptr. 7864243830Sdimstruct ConsecutiveMemoryChainSorter { 7865243830Sdim bool operator()(MemOpLink LHS, MemOpLink RHS) { 7866243830Sdim return LHS.OffsetFromBase < RHS.OffsetFromBase; 7867243830Sdim } 7868243830Sdim}; 7869243830Sdim 7870243830Sdimbool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { 7871243830Sdim EVT MemVT = St->getMemoryVT(); 7872243830Sdim int64_t ElementSizeBytes = MemVT.getSizeInBits()/8; 7873249423Sdim bool NoVectors = DAG.getMachineFunction().getFunction()->getAttributes(). 7874249423Sdim hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); 7875243830Sdim 7876243830Sdim // Don't merge vectors into wider inputs. 7877243830Sdim if (MemVT.isVector() || !MemVT.isSimple()) 7878243830Sdim return false; 7879243830Sdim 7880243830Sdim // Perform an early exit check. Do not bother looking at stored values that 7881243830Sdim // are not constants or loads. 7882243830Sdim SDValue StoredVal = St->getValue(); 7883243830Sdim bool IsLoadSrc = isa<LoadSDNode>(StoredVal); 7884243830Sdim if (!isa<ConstantSDNode>(StoredVal) && !isa<ConstantFPSDNode>(StoredVal) && 7885243830Sdim !IsLoadSrc) 7886243830Sdim return false; 7887243830Sdim 7888243830Sdim // Only look at ends of store sequences. 7889243830Sdim SDValue Chain = SDValue(St, 1); 7890243830Sdim if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) 7891243830Sdim return false; 7892243830Sdim 7893249423Sdim // This holds the base pointer, index, and the offset in bytes from the base 7894249423Sdim // pointer. 7895249423Sdim BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr()); 7896243830Sdim 7897243830Sdim // We must have a base and an offset. 7898249423Sdim if (!BasePtr.Base.getNode()) 7899243830Sdim return false; 7900243830Sdim 7901243830Sdim // Do not handle stores to undef base pointers. 7902249423Sdim if (BasePtr.Base.getOpcode() == ISD::UNDEF) 7903243830Sdim return false; 7904243830Sdim 7905249423Sdim // Save the LoadSDNodes that we find in the chain. 7906249423Sdim // We need to make sure that these nodes do not interfere with 7907249423Sdim // any of the store nodes. 7908249423Sdim SmallVector<LSBaseSDNode*, 8> AliasLoadNodes; 7909249423Sdim 7910249423Sdim // Save the StoreSDNodes that we find in the chain. 7911243830Sdim SmallVector<MemOpLink, 8> StoreNodes; 7912249423Sdim 7913243830Sdim // Walk up the chain and look for nodes with offsets from the same 7914243830Sdim // base pointer. Stop when reaching an instruction with a different kind 7915243830Sdim // or instruction which has a different base pointer. 7916243830Sdim unsigned Seq = 0; 7917243830Sdim StoreSDNode *Index = St; 7918243830Sdim while (Index) { 7919243830Sdim // If the chain has more than one use, then we can't reorder the mem ops. 7920243830Sdim if (Index != St && !SDValue(Index, 1)->hasOneUse()) 7921243830Sdim break; 7922243830Sdim 7923243830Sdim // Find the base pointer and offset for this memory node. 7924249423Sdim BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr()); 7925243830Sdim 7926243830Sdim // Check that the base pointer is the same as the original one. 7927249423Sdim if (!Ptr.equalBaseIndex(BasePtr)) 7928243830Sdim break; 7929243830Sdim 7930243830Sdim // Check that the alignment is the same. 7931243830Sdim if (Index->getAlignment() != St->getAlignment()) 7932243830Sdim break; 7933243830Sdim 7934243830Sdim // The memory operands must not be volatile. 7935243830Sdim if (Index->isVolatile() || Index->isIndexed()) 7936243830Sdim break; 7937243830Sdim 7938243830Sdim // No truncation. 7939243830Sdim if (StoreSDNode *St = dyn_cast<StoreSDNode>(Index)) 7940243830Sdim if (St->isTruncatingStore()) 7941243830Sdim break; 7942243830Sdim 7943243830Sdim // The stored memory type must be the same. 7944243830Sdim if (Index->getMemoryVT() != MemVT) 7945243830Sdim break; 7946243830Sdim 7947243830Sdim // We do not allow unaligned stores because we want to prevent overriding 7948243830Sdim // stores. 7949243830Sdim if (Index->getAlignment()*8 != MemVT.getSizeInBits()) 7950243830Sdim break; 7951243830Sdim 7952243830Sdim // We found a potential memory operand to merge. 7953249423Sdim StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++)); 7954243830Sdim 7955249423Sdim // Find the next memory operand in the chain. If the next operand in the 7956249423Sdim // chain is a store then move up and continue the scan with the next 7957249423Sdim // memory operand. If the next operand is a load save it and use alias 7958249423Sdim // information to check if it interferes with anything. 7959249423Sdim SDNode *NextInChain = Index->getChain().getNode(); 7960249423Sdim while (1) { 7961249423Sdim if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 7962249423Sdim // We found a store node. Use it for the next iteration. 7963249423Sdim Index = STn; 7964249423Sdim break; 7965249423Sdim } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 7966249423Sdim // Save the load node for later. Continue the scan. 7967249423Sdim AliasLoadNodes.push_back(Ldn); 7968249423Sdim NextInChain = Ldn->getChain().getNode(); 7969249423Sdim continue; 7970249423Sdim } else { 7971249423Sdim Index = NULL; 7972249423Sdim break; 7973249423Sdim } 7974249423Sdim } 7975243830Sdim } 7976243830Sdim 7977243830Sdim // Check if there is anything to merge. 7978243830Sdim if (StoreNodes.size() < 2) 7979243830Sdim return false; 7980243830Sdim 7981243830Sdim // Sort the memory operands according to their distance from the base pointer. 7982243830Sdim std::sort(StoreNodes.begin(), StoreNodes.end(), 7983243830Sdim ConsecutiveMemoryChainSorter()); 7984243830Sdim 7985243830Sdim // Scan the memory operations on the chain and find the first non-consecutive 7986243830Sdim // store memory address. 7987243830Sdim unsigned LastConsecutiveStore = 0; 7988243830Sdim int64_t StartAddress = StoreNodes[0].OffsetFromBase; 7989249423Sdim for (unsigned i = 0, e = StoreNodes.size(); i < e; ++i) { 7990249423Sdim 7991249423Sdim // Check that the addresses are consecutive starting from the second 7992249423Sdim // element in the list of stores. 7993249423Sdim if (i > 0) { 7994249423Sdim int64_t CurrAddress = StoreNodes[i].OffsetFromBase; 7995249423Sdim if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 7996249423Sdim break; 7997249423Sdim } 7998249423Sdim 7999249423Sdim bool Alias = false; 8000249423Sdim // Check if this store interferes with any of the loads that we found. 8001249423Sdim for (unsigned ld = 0, lde = AliasLoadNodes.size(); ld < lde; ++ld) 8002249423Sdim if (isAlias(AliasLoadNodes[ld], StoreNodes[i].MemNode)) { 8003249423Sdim Alias = true; 8004249423Sdim break; 8005249423Sdim } 8006249423Sdim // We found a load that alias with this store. Stop the sequence. 8007249423Sdim if (Alias) 8008243830Sdim break; 8009243830Sdim 8010243830Sdim // Mark this node as useful. 8011243830Sdim LastConsecutiveStore = i; 8012243830Sdim } 8013243830Sdim 8014243830Sdim // The node with the lowest store address. 8015243830Sdim LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 8016243830Sdim 8017243830Sdim // Store the constants into memory as one consecutive store. 8018243830Sdim if (!IsLoadSrc) { 8019243830Sdim unsigned LastLegalType = 0; 8020243830Sdim unsigned LastLegalVectorType = 0; 8021243830Sdim bool NonZero = false; 8022243830Sdim for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 8023243830Sdim StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 8024243830Sdim SDValue StoredVal = St->getValue(); 8025243830Sdim 8026243830Sdim if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) { 8027243830Sdim NonZero |= !C->isNullValue(); 8028243830Sdim } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(StoredVal)) { 8029243830Sdim NonZero |= !C->getConstantFPValue()->isNullValue(); 8030243830Sdim } else { 8031243830Sdim // Non constant. 8032243830Sdim break; 8033243830Sdim } 8034243830Sdim 8035243830Sdim // Find a legal type for the constant store. 8036243830Sdim unsigned StoreBW = (i+1) * ElementSizeBytes * 8; 8037243830Sdim EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 8038243830Sdim if (TLI.isTypeLegal(StoreTy)) 8039243830Sdim LastLegalType = i+1; 8040249423Sdim // Or check whether a truncstore is legal. 8041249423Sdim else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == 8042249423Sdim TargetLowering::TypePromoteInteger) { 8043249423Sdim EVT LegalizedStoredValueTy = 8044249423Sdim TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType()); 8045249423Sdim if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy)) 8046249423Sdim LastLegalType = i+1; 8047249423Sdim } 8048243830Sdim 8049243830Sdim // Find a legal type for the vector store. 8050243830Sdim EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); 8051243830Sdim if (TLI.isTypeLegal(Ty)) 8052243830Sdim LastLegalVectorType = i + 1; 8053243830Sdim } 8054243830Sdim 8055249423Sdim // We only use vectors if the constant is known to be zero and the 8056249423Sdim // function is not marked with the noimplicitfloat attribute. 8057249423Sdim if (NonZero || NoVectors) 8058243830Sdim LastLegalVectorType = 0; 8059243830Sdim 8060243830Sdim // Check if we found a legal integer type to store. 8061243830Sdim if (LastLegalType == 0 && LastLegalVectorType == 0) 8062243830Sdim return false; 8063243830Sdim 8064249423Sdim bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors; 8065243830Sdim unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType; 8066243830Sdim 8067243830Sdim // Make sure we have something to merge. 8068243830Sdim if (NumElem < 2) 8069243830Sdim return false; 8070243830Sdim 8071243830Sdim unsigned EarliestNodeUsed = 0; 8072243830Sdim for (unsigned i=0; i < NumElem; ++i) { 8073243830Sdim // Find a chain for the new wide-store operand. Notice that some 8074243830Sdim // of the store nodes that we found may not be selected for inclusion 8075243830Sdim // in the wide store. The chain we use needs to be the chain of the 8076243830Sdim // earliest store node which is *used* and replaced by the wide store. 8077243830Sdim if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum) 8078243830Sdim EarliestNodeUsed = i; 8079243830Sdim } 8080243830Sdim 8081243830Sdim // The earliest Node in the DAG. 8082243830Sdim LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; 8083243830Sdim DebugLoc DL = StoreNodes[0].MemNode->getDebugLoc(); 8084243830Sdim 8085243830Sdim SDValue StoredVal; 8086243830Sdim if (UseVector) { 8087243830Sdim // Find a legal type for the vector store. 8088243830Sdim EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); 8089243830Sdim assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); 8090243830Sdim StoredVal = DAG.getConstant(0, Ty); 8091243830Sdim } else { 8092243830Sdim unsigned StoreBW = NumElem * ElementSizeBytes * 8; 8093243830Sdim APInt StoreInt(StoreBW, 0); 8094243830Sdim 8095243830Sdim // Construct a single integer constant which is made of the smaller 8096243830Sdim // constant inputs. 8097243830Sdim bool IsLE = TLI.isLittleEndian(); 8098243830Sdim for (unsigned i = 0; i < NumElem ; ++i) { 8099243830Sdim unsigned Idx = IsLE ?(NumElem - 1 - i) : i; 8100243830Sdim StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); 8101243830Sdim SDValue Val = St->getValue(); 8102243830Sdim StoreInt<<=ElementSizeBytes*8; 8103243830Sdim if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) { 8104243830Sdim StoreInt|=C->getAPIntValue().zext(StoreBW); 8105243830Sdim } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) { 8106243830Sdim StoreInt|= C->getValueAPF().bitcastToAPInt().zext(StoreBW); 8107243830Sdim } else { 8108243830Sdim assert(false && "Invalid constant element type"); 8109243830Sdim } 8110243830Sdim } 8111243830Sdim 8112243830Sdim // Create the new Load and Store operations. 8113243830Sdim EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 8114243830Sdim StoredVal = DAG.getConstant(StoreInt, StoreTy); 8115243830Sdim } 8116243830Sdim 8117243830Sdim SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal, 8118243830Sdim FirstInChain->getBasePtr(), 8119243830Sdim FirstInChain->getPointerInfo(), 8120243830Sdim false, false, 8121243830Sdim FirstInChain->getAlignment()); 8122243830Sdim 8123243830Sdim // Replace the first store with the new store 8124243830Sdim CombineTo(EarliestOp, NewStore); 8125243830Sdim // Erase all other stores. 8126243830Sdim for (unsigned i = 0; i < NumElem ; ++i) { 8127243830Sdim if (StoreNodes[i].MemNode == EarliestOp) 8128243830Sdim continue; 8129243830Sdim StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 8130243830Sdim // ReplaceAllUsesWith will replace all uses that existed when it was 8131243830Sdim // called, but graph optimizations may cause new ones to appear. For 8132243830Sdim // example, the case in pr14333 looks like 8133243830Sdim // 8134243830Sdim // St's chain -> St -> another store -> X 8135243830Sdim // 8136243830Sdim // And the only difference from St to the other store is the chain. 8137243830Sdim // When we change it's chain to be St's chain they become identical, 8138243830Sdim // get CSEed and the net result is that X is now a use of St. 8139243830Sdim // Since we know that St is redundant, just iterate. 8140243830Sdim while (!St->use_empty()) 8141243830Sdim DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); 8142243830Sdim removeFromWorkList(St); 8143243830Sdim DAG.DeleteNode(St); 8144243830Sdim } 8145243830Sdim 8146243830Sdim return true; 8147243830Sdim } 8148243830Sdim 8149243830Sdim // Below we handle the case of multiple consecutive stores that 8150243830Sdim // come from multiple consecutive loads. We merge them into a single 8151243830Sdim // wide load and a single wide store. 8152243830Sdim 8153243830Sdim // Look for load nodes which are used by the stored values. 8154243830Sdim SmallVector<MemOpLink, 8> LoadNodes; 8155243830Sdim 8156243830Sdim // Find acceptable loads. Loads need to have the same chain (token factor), 8157243830Sdim // must not be zext, volatile, indexed, and they must be consecutive. 8158249423Sdim BaseIndexOffset LdBasePtr; 8159243830Sdim for (unsigned i=0; i<LastConsecutiveStore+1; ++i) { 8160243830Sdim StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 8161243830Sdim LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); 8162243830Sdim if (!Ld) break; 8163243830Sdim 8164243830Sdim // Loads must only have one use. 8165243830Sdim if (!Ld->hasNUsesOfValue(1, 0)) 8166243830Sdim break; 8167243830Sdim 8168243830Sdim // Check that the alignment is the same as the stores. 8169243830Sdim if (Ld->getAlignment() != St->getAlignment()) 8170243830Sdim break; 8171243830Sdim 8172243830Sdim // The memory operands must not be volatile. 8173243830Sdim if (Ld->isVolatile() || Ld->isIndexed()) 8174243830Sdim break; 8175243830Sdim 8176243830Sdim // We do not accept ext loads. 8177243830Sdim if (Ld->getExtensionType() != ISD::NON_EXTLOAD) 8178243830Sdim break; 8179243830Sdim 8180243830Sdim // The stored memory type must be the same. 8181243830Sdim if (Ld->getMemoryVT() != MemVT) 8182243830Sdim break; 8183243830Sdim 8184249423Sdim BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr()); 8185243830Sdim // If this is not the first ptr that we check. 8186249423Sdim if (LdBasePtr.Base.getNode()) { 8187243830Sdim // The base ptr must be the same. 8188249423Sdim if (!LdPtr.equalBaseIndex(LdBasePtr)) 8189243830Sdim break; 8190243830Sdim } else { 8191243830Sdim // Check that all other base pointers are the same as this one. 8192249423Sdim LdBasePtr = LdPtr; 8193243830Sdim } 8194243830Sdim 8195243830Sdim // We found a potential memory operand to merge. 8196249423Sdim LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset, 0)); 8197243830Sdim } 8198243830Sdim 8199243830Sdim if (LoadNodes.size() < 2) 8200243830Sdim return false; 8201243830Sdim 8202243830Sdim // Scan the memory operations on the chain and find the first non-consecutive 8203243830Sdim // load memory address. These variables hold the index in the store node 8204243830Sdim // array. 8205243830Sdim unsigned LastConsecutiveLoad = 0; 8206243830Sdim // This variable refers to the size and not index in the array. 8207243830Sdim unsigned LastLegalVectorType = 0; 8208243830Sdim unsigned LastLegalIntegerType = 0; 8209243830Sdim StartAddress = LoadNodes[0].OffsetFromBase; 8210243830Sdim SDValue FirstChain = LoadNodes[0].MemNode->getChain(); 8211243830Sdim for (unsigned i = 1; i < LoadNodes.size(); ++i) { 8212243830Sdim // All loads much share the same chain. 8213243830Sdim if (LoadNodes[i].MemNode->getChain() != FirstChain) 8214243830Sdim break; 8215249423Sdim 8216243830Sdim int64_t CurrAddress = LoadNodes[i].OffsetFromBase; 8217243830Sdim if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 8218243830Sdim break; 8219243830Sdim LastConsecutiveLoad = i; 8220243830Sdim 8221243830Sdim // Find a legal type for the vector store. 8222243830Sdim EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); 8223243830Sdim if (TLI.isTypeLegal(StoreTy)) 8224243830Sdim LastLegalVectorType = i + 1; 8225243830Sdim 8226243830Sdim // Find a legal type for the integer store. 8227243830Sdim unsigned StoreBW = (i+1) * ElementSizeBytes * 8; 8228243830Sdim StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 8229243830Sdim if (TLI.isTypeLegal(StoreTy)) 8230243830Sdim LastLegalIntegerType = i + 1; 8231249423Sdim // Or check whether a truncstore and extload is legal. 8232249423Sdim else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) == 8233249423Sdim TargetLowering::TypePromoteInteger) { 8234249423Sdim EVT LegalizedStoredValueTy = 8235249423Sdim TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy); 8236249423Sdim if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 8237249423Sdim TLI.isLoadExtLegal(ISD::ZEXTLOAD, StoreTy) && 8238249423Sdim TLI.isLoadExtLegal(ISD::SEXTLOAD, StoreTy) && 8239249423Sdim TLI.isLoadExtLegal(ISD::EXTLOAD, StoreTy)) 8240249423Sdim LastLegalIntegerType = i+1; 8241249423Sdim } 8242243830Sdim } 8243243830Sdim 8244243830Sdim // Only use vector types if the vector type is larger than the integer type. 8245243830Sdim // If they are the same, use integers. 8246249423Sdim bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors; 8247243830Sdim unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); 8248243830Sdim 8249243830Sdim // We add +1 here because the LastXXX variables refer to location while 8250243830Sdim // the NumElem refers to array/index size. 8251243830Sdim unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1; 8252243830Sdim NumElem = std::min(LastLegalType, NumElem); 8253243830Sdim 8254243830Sdim if (NumElem < 2) 8255243830Sdim return false; 8256243830Sdim 8257243830Sdim // The earliest Node in the DAG. 8258243830Sdim unsigned EarliestNodeUsed = 0; 8259243830Sdim LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; 8260243830Sdim for (unsigned i=1; i<NumElem; ++i) { 8261243830Sdim // Find a chain for the new wide-store operand. Notice that some 8262243830Sdim // of the store nodes that we found may not be selected for inclusion 8263243830Sdim // in the wide store. The chain we use needs to be the chain of the 8264243830Sdim // earliest store node which is *used* and replaced by the wide store. 8265243830Sdim if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum) 8266243830Sdim EarliestNodeUsed = i; 8267243830Sdim } 8268243830Sdim 8269243830Sdim // Find if it is better to use vectors or integers to load and store 8270243830Sdim // to memory. 8271243830Sdim EVT JointMemOpVT; 8272243830Sdim if (UseVectorTy) { 8273243830Sdim JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); 8274243830Sdim } else { 8275243830Sdim unsigned StoreBW = NumElem * ElementSizeBytes * 8; 8276243830Sdim JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW); 8277243830Sdim } 8278243830Sdim 8279243830Sdim DebugLoc LoadDL = LoadNodes[0].MemNode->getDebugLoc(); 8280243830Sdim DebugLoc StoreDL = StoreNodes[0].MemNode->getDebugLoc(); 8281243830Sdim 8282243830Sdim LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode); 8283243830Sdim SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, 8284243830Sdim FirstLoad->getChain(), 8285243830Sdim FirstLoad->getBasePtr(), 8286243830Sdim FirstLoad->getPointerInfo(), 8287243830Sdim false, false, false, 8288243830Sdim FirstLoad->getAlignment()); 8289243830Sdim 8290243830Sdim SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad, 8291243830Sdim FirstInChain->getBasePtr(), 8292243830Sdim FirstInChain->getPointerInfo(), false, false, 8293243830Sdim FirstInChain->getAlignment()); 8294243830Sdim 8295243830Sdim // Replace one of the loads with the new load. 8296243830Sdim LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode); 8297243830Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), 8298243830Sdim SDValue(NewLoad.getNode(), 1)); 8299243830Sdim 8300243830Sdim // Remove the rest of the load chains. 8301243830Sdim for (unsigned i = 1; i < NumElem ; ++i) { 8302243830Sdim // Replace all chain users of the old load nodes with the chain of the new 8303243830Sdim // load node. 8304243830Sdim LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode); 8305243830Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain()); 8306243830Sdim } 8307243830Sdim 8308243830Sdim // Replace the first store with the new store. 8309243830Sdim CombineTo(EarliestOp, NewStore); 8310243830Sdim // Erase all other stores. 8311243830Sdim for (unsigned i = 0; i < NumElem ; ++i) { 8312243830Sdim // Remove all Store nodes. 8313243830Sdim if (StoreNodes[i].MemNode == EarliestOp) 8314243830Sdim continue; 8315243830Sdim StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 8316243830Sdim DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); 8317243830Sdim removeFromWorkList(St); 8318243830Sdim DAG.DeleteNode(St); 8319243830Sdim } 8320243830Sdim 8321243830Sdim return true; 8322243830Sdim} 8323243830Sdim 8324193323SedSDValue DAGCombiner::visitSTORE(SDNode *N) { 8325193323Sed StoreSDNode *ST = cast<StoreSDNode>(N); 8326193323Sed SDValue Chain = ST->getChain(); 8327193323Sed SDValue Value = ST->getValue(); 8328193323Sed SDValue Ptr = ST->getBasePtr(); 8329193323Sed 8330193323Sed // If this is a store of a bit convert, store the input value if the 8331193323Sed // resultant store does not need a higher alignment than the original. 8332218893Sdim if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 8333193323Sed ST->isUnindexed()) { 8334193323Sed unsigned OrigAlign = ST->getAlignment(); 8335198090Srdivacky EVT SVT = Value.getOperand(0).getValueType(); 8336243830Sdim unsigned Align = TLI.getDataLayout()-> 8337198090Srdivacky getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); 8338193323Sed if (Align <= OrigAlign && 8339193323Sed ((!LegalOperations && !ST->isVolatile()) || 8340193323Sed TLI.isOperationLegalOrCustom(ISD::STORE, SVT))) 8341193323Sed return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0), 8342218893Sdim Ptr, ST->getPointerInfo(), ST->isVolatile(), 8343203954Srdivacky ST->isNonTemporal(), OrigAlign); 8344193323Sed } 8345193323Sed 8346221345Sdim // Turn 'store undef, Ptr' -> nothing. 8347221345Sdim if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed()) 8348221345Sdim return Chain; 8349221345Sdim 8350193323Sed // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 8351193323Sed if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) { 8352193323Sed // NOTE: If the original store is volatile, this transform must not increase 8353193323Sed // the number of stores. For example, on x86-32 an f64 can be stored in one 8354193323Sed // processor operation but an i64 (which is not legal) requires two. So the 8355193323Sed // transform should not be done in this case. 8356193323Sed if (Value.getOpcode() != ISD::TargetConstantFP) { 8357193323Sed SDValue Tmp; 8358198090Srdivacky switch (CFP->getValueType(0).getSimpleVT().SimpleTy) { 8359198090Srdivacky default: llvm_unreachable("Unknown FP type"); 8360239462Sdim case MVT::f16: // We don't do this for these yet. 8361239462Sdim case MVT::f80: 8362193323Sed case MVT::f128: 8363193323Sed case MVT::ppcf128: 8364193323Sed break; 8365193323Sed case MVT::f32: 8366207618Srdivacky if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 8367193323Sed TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 8368193323Sed Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 8369193323Sed bitcastToAPInt().getZExtValue(), MVT::i32); 8370193323Sed return DAG.getStore(Chain, N->getDebugLoc(), Tmp, 8371218893Sdim Ptr, ST->getPointerInfo(), ST->isVolatile(), 8372203954Srdivacky ST->isNonTemporal(), ST->getAlignment()); 8373193323Sed } 8374193323Sed break; 8375193323Sed case MVT::f64: 8376207618Srdivacky if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 8377193323Sed !ST->isVolatile()) || 8378193323Sed TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 8379193323Sed Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 8380193323Sed getZExtValue(), MVT::i64); 8381193323Sed return DAG.getStore(Chain, N->getDebugLoc(), Tmp, 8382218893Sdim Ptr, ST->getPointerInfo(), ST->isVolatile(), 8383203954Srdivacky ST->isNonTemporal(), ST->getAlignment()); 8384221345Sdim } 8385221345Sdim 8386221345Sdim if (!ST->isVolatile() && 8387221345Sdim TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 8388193323Sed // Many FP stores are not made apparent until after legalize, e.g. for 8389193323Sed // argument passing. Since this is so common, custom legalize the 8390193323Sed // 64-bit integer store into two 32-bit stores. 8391193323Sed uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 8392193323Sed SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32); 8393193323Sed SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32); 8394193323Sed if (TLI.isBigEndian()) std::swap(Lo, Hi); 8395193323Sed 8396193323Sed unsigned Alignment = ST->getAlignment(); 8397193323Sed bool isVolatile = ST->isVolatile(); 8398203954Srdivacky bool isNonTemporal = ST->isNonTemporal(); 8399193323Sed 8400193323Sed SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo, 8401218893Sdim Ptr, ST->getPointerInfo(), 8402203954Srdivacky isVolatile, isNonTemporal, 8403203954Srdivacky ST->getAlignment()); 8404193323Sed Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr, 8405193323Sed DAG.getConstant(4, Ptr.getValueType())); 8406193323Sed Alignment = MinAlign(Alignment, 4U); 8407193323Sed SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi, 8408218893Sdim Ptr, ST->getPointerInfo().getWithOffset(4), 8409218893Sdim isVolatile, isNonTemporal, 8410203954Srdivacky Alignment); 8411193323Sed return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other, 8412193323Sed St0, St1); 8413193323Sed } 8414193323Sed 8415193323Sed break; 8416193323Sed } 8417193323Sed } 8418193323Sed } 8419193323Sed 8420206083Srdivacky // Try to infer better alignment information than the store already has. 8421206083Srdivacky if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 8422206083Srdivacky if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 8423206083Srdivacky if (Align > ST->getAlignment()) 8424206083Srdivacky return DAG.getTruncStore(Chain, N->getDebugLoc(), Value, 8425218893Sdim Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 8426206083Srdivacky ST->isVolatile(), ST->isNonTemporal(), Align); 8427206083Srdivacky } 8428206083Srdivacky } 8429206083Srdivacky 8430218893Sdim // Try transforming a pair floating point load / store ops to integer 8431218893Sdim // load / store ops. 8432218893Sdim SDValue NewST = TransformFPLoadStorePair(N); 8433218893Sdim if (NewST.getNode()) 8434218893Sdim return NewST; 8435218893Sdim 8436193323Sed if (CombinerAA) { 8437193323Sed // Walk up chain skipping non-aliasing memory nodes. 8438193323Sed SDValue BetterChain = FindBetterChain(N, Chain); 8439193323Sed 8440193323Sed // If there is a better chain. 8441193323Sed if (Chain != BetterChain) { 8442198090Srdivacky SDValue ReplStore; 8443198090Srdivacky 8444193323Sed // Replace the chain to avoid dependency. 8445193323Sed if (ST->isTruncatingStore()) { 8446193323Sed ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr, 8447218893Sdim ST->getPointerInfo(), 8448203954Srdivacky ST->getMemoryVT(), ST->isVolatile(), 8449203954Srdivacky ST->isNonTemporal(), ST->getAlignment()); 8450193323Sed } else { 8451193323Sed ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr, 8452218893Sdim ST->getPointerInfo(), 8453203954Srdivacky ST->isVolatile(), ST->isNonTemporal(), 8454203954Srdivacky ST->getAlignment()); 8455193323Sed } 8456193323Sed 8457193323Sed // Create token to keep both nodes around. 8458193323Sed SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 8459193323Sed MVT::Other, Chain, ReplStore); 8460193323Sed 8461198090Srdivacky // Make sure the new and old chains are cleaned up. 8462198090Srdivacky AddToWorkList(Token.getNode()); 8463198090Srdivacky 8464193323Sed // Don't add users to work list. 8465193323Sed return CombineTo(N, Token, false); 8466193323Sed } 8467193323Sed } 8468193323Sed 8469193323Sed // Try transforming N to an indexed store. 8470193323Sed if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 8471193323Sed return SDValue(N, 0); 8472193323Sed 8473193323Sed // FIXME: is there such a thing as a truncating indexed store? 8474193323Sed if (ST->isTruncatingStore() && ST->isUnindexed() && 8475193323Sed Value.getValueType().isInteger()) { 8476193323Sed // See if we can simplify the input to this truncstore with knowledge that 8477193323Sed // only the low bits are being used. For example: 8478193323Sed // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 8479193323Sed SDValue Shorter = 8480193323Sed GetDemandedBits(Value, 8481224145Sdim APInt::getLowBitsSet( 8482224145Sdim Value.getValueType().getScalarType().getSizeInBits(), 8483224145Sdim ST->getMemoryVT().getScalarType().getSizeInBits())); 8484193323Sed AddToWorkList(Value.getNode()); 8485193323Sed if (Shorter.getNode()) 8486193323Sed return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter, 8487218893Sdim Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 8488203954Srdivacky ST->isVolatile(), ST->isNonTemporal(), 8489203954Srdivacky ST->getAlignment()); 8490193323Sed 8491193323Sed // Otherwise, see if we can simplify the operation with 8492193323Sed // SimplifyDemandedBits, which only works if the value has a single use. 8493193323Sed if (SimplifyDemandedBits(Value, 8494218893Sdim APInt::getLowBitsSet( 8495218893Sdim Value.getValueType().getScalarType().getSizeInBits(), 8496218893Sdim ST->getMemoryVT().getScalarType().getSizeInBits()))) 8497193323Sed return SDValue(N, 0); 8498193323Sed } 8499193323Sed 8500193323Sed // If this is a load followed by a store to the same location, then the store 8501193323Sed // is dead/noop. 8502193323Sed if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 8503193323Sed if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 8504193323Sed ST->isUnindexed() && !ST->isVolatile() && 8505193323Sed // There can't be any side effects between the load and store, such as 8506193323Sed // a call or store. 8507193323Sed Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 8508193323Sed // The store is dead, remove it. 8509193323Sed return Chain; 8510193323Sed } 8511193323Sed } 8512193323Sed 8513193323Sed // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 8514193323Sed // truncating store. We can do this even if this is already a truncstore. 8515193323Sed if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 8516193323Sed && Value.getNode()->hasOneUse() && ST->isUnindexed() && 8517193323Sed TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 8518193323Sed ST->getMemoryVT())) { 8519193323Sed return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0), 8520218893Sdim Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 8521203954Srdivacky ST->isVolatile(), ST->isNonTemporal(), 8522203954Srdivacky ST->getAlignment()); 8523193323Sed } 8524193323Sed 8525243830Sdim // Only perform this optimization before the types are legal, because we 8526243830Sdim // don't want to perform this optimization on every DAGCombine invocation. 8527249423Sdim if (!LegalTypes) { 8528249423Sdim bool EverChanged = false; 8529243830Sdim 8530249423Sdim do { 8531249423Sdim // There can be multiple store sequences on the same chain. 8532249423Sdim // Keep trying to merge store sequences until we are unable to do so 8533249423Sdim // or until we merge the last store on the chain. 8534249423Sdim bool Changed = MergeConsecutiveStores(ST); 8535249423Sdim EverChanged |= Changed; 8536249423Sdim if (!Changed) break; 8537249423Sdim } while (ST->getOpcode() != ISD::DELETED_NODE); 8538249423Sdim 8539249423Sdim if (EverChanged) 8540249423Sdim return SDValue(N, 0); 8541249423Sdim } 8542249423Sdim 8543193323Sed return ReduceLoadOpStoreWidth(N); 8544193323Sed} 8545193323Sed 8546193323SedSDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 8547193323Sed SDValue InVec = N->getOperand(0); 8548193323Sed SDValue InVal = N->getOperand(1); 8549193323Sed SDValue EltNo = N->getOperand(2); 8550226633Sdim DebugLoc dl = N->getDebugLoc(); 8551193323Sed 8552208599Srdivacky // If the inserted element is an UNDEF, just use the input vector. 8553208599Srdivacky if (InVal.getOpcode() == ISD::UNDEF) 8554208599Srdivacky return InVec; 8555208599Srdivacky 8556218893Sdim EVT VT = InVec.getValueType(); 8557218893Sdim 8558219077Sdim // If we can't generate a legal BUILD_VECTOR, exit 8559218893Sdim if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 8560218893Sdim return SDValue(); 8561218893Sdim 8562226633Sdim // Check that we know which element is being inserted 8563226633Sdim if (!isa<ConstantSDNode>(EltNo)) 8564226633Sdim return SDValue(); 8565226633Sdim unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 8566226633Sdim 8567226633Sdim // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 8568226633Sdim // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 8569226633Sdim // vector elements. 8570226633Sdim SmallVector<SDValue, 8> Ops; 8571226633Sdim if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 8572226633Sdim Ops.append(InVec.getNode()->op_begin(), 8573226633Sdim InVec.getNode()->op_end()); 8574226633Sdim } else if (InVec.getOpcode() == ISD::UNDEF) { 8575226633Sdim unsigned NElts = VT.getVectorNumElements(); 8576226633Sdim Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 8577226633Sdim } else { 8578226633Sdim return SDValue(); 8579193323Sed } 8580193323Sed 8581226633Sdim // Insert the element 8582226633Sdim if (Elt < Ops.size()) { 8583226633Sdim // All the operands of BUILD_VECTOR must have the same type; 8584226633Sdim // we enforce that here. 8585226633Sdim EVT OpVT = Ops[0].getValueType(); 8586226633Sdim if (InVal.getValueType() != OpVT) 8587226633Sdim InVal = OpVT.bitsGT(InVal.getValueType()) ? 8588226633Sdim DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) : 8589226633Sdim DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal); 8590226633Sdim Ops[Elt] = InVal; 8591193323Sed } 8592226633Sdim 8593226633Sdim // Return the new vector 8594226633Sdim return DAG.getNode(ISD::BUILD_VECTOR, dl, 8595226633Sdim VT, &Ops[0], Ops.size()); 8596193323Sed} 8597193323Sed 8598193323SedSDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 8599193323Sed // (vextract (scalar_to_vector val, 0) -> val 8600193323Sed SDValue InVec = N->getOperand(0); 8601234353Sdim EVT VT = InVec.getValueType(); 8602234353Sdim EVT NVT = N->getValueType(0); 8603193323Sed 8604223017Sdim if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 8605223017Sdim // Check if the result type doesn't match the inserted element type. A 8606223017Sdim // SCALAR_TO_VECTOR may truncate the inserted element and the 8607223017Sdim // EXTRACT_VECTOR_ELT may widen the extracted vector. 8608223017Sdim SDValue InOp = InVec.getOperand(0); 8609223017Sdim if (InOp.getValueType() != NVT) { 8610223017Sdim assert(InOp.getValueType().isInteger() && NVT.isInteger()); 8611223017Sdim return DAG.getSExtOrTrunc(InOp, InVec.getDebugLoc(), NVT); 8612223017Sdim } 8613223017Sdim return InOp; 8614223017Sdim } 8615193323Sed 8616234353Sdim SDValue EltNo = N->getOperand(1); 8617234353Sdim bool ConstEltNo = isa<ConstantSDNode>(EltNo); 8618234353Sdim 8619234353Sdim // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 8620234353Sdim // We only perform this optimization before the op legalization phase because 8621243830Sdim // we may introduce new vector instructions which are not backed by TD 8622243830Sdim // patterns. For example on AVX, extracting elements from a wide vector 8623243830Sdim // without using extract_subvector. 8624234353Sdim if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE 8625234353Sdim && ConstEltNo && !LegalOperations) { 8626234353Sdim int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 8627234353Sdim int NumElem = VT.getVectorNumElements(); 8628234353Sdim ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 8629234353Sdim // Find the new index to extract from. 8630234353Sdim int OrigElt = SVOp->getMaskElt(Elt); 8631234353Sdim 8632234353Sdim // Extracting an undef index is undef. 8633234353Sdim if (OrigElt == -1) 8634234353Sdim return DAG.getUNDEF(NVT); 8635234353Sdim 8636234353Sdim // Select the right vector half to extract from. 8637234353Sdim if (OrigElt < NumElem) { 8638234353Sdim InVec = InVec->getOperand(0); 8639234353Sdim } else { 8640234353Sdim InVec = InVec->getOperand(1); 8641234353Sdim OrigElt -= NumElem; 8642234353Sdim } 8643234353Sdim 8644239462Sdim EVT IndexTy = N->getOperand(1).getValueType(); 8645234353Sdim return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getDebugLoc(), NVT, 8646239462Sdim InVec, DAG.getConstant(OrigElt, IndexTy)); 8647234353Sdim } 8648234353Sdim 8649193323Sed // Perform only after legalization to ensure build_vector / vector_shuffle 8650193323Sed // optimizations have already been done. 8651193323Sed if (!LegalOperations) return SDValue(); 8652193323Sed 8653193323Sed // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 8654193323Sed // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 8655193323Sed // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 8656193323Sed 8657234353Sdim if (ConstEltNo) { 8658218893Sdim int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 8659193323Sed bool NewLoad = false; 8660193323Sed bool BCNumEltsChanged = false; 8661198090Srdivacky EVT ExtVT = VT.getVectorElementType(); 8662198090Srdivacky EVT LVT = ExtVT; 8663193323Sed 8664234353Sdim // If the result of load has to be truncated, then it's not necessarily 8665234353Sdim // profitable. 8666234353Sdim if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 8667234353Sdim return SDValue(); 8668234353Sdim 8669218893Sdim if (InVec.getOpcode() == ISD::BITCAST) { 8670234353Sdim // Don't duplicate a load with other uses. 8671234353Sdim if (!InVec.hasOneUse()) 8672234353Sdim return SDValue(); 8673234353Sdim 8674198090Srdivacky EVT BCVT = InVec.getOperand(0).getValueType(); 8675198090Srdivacky if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 8676193323Sed return SDValue(); 8677193323Sed if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 8678193323Sed BCNumEltsChanged = true; 8679193323Sed InVec = InVec.getOperand(0); 8680198090Srdivacky ExtVT = BCVT.getVectorElementType(); 8681193323Sed NewLoad = true; 8682193323Sed } 8683193323Sed 8684193323Sed LoadSDNode *LN0 = NULL; 8685193323Sed const ShuffleVectorSDNode *SVN = NULL; 8686193323Sed if (ISD::isNormalLoad(InVec.getNode())) { 8687193323Sed LN0 = cast<LoadSDNode>(InVec); 8688193323Sed } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 8689198090Srdivacky InVec.getOperand(0).getValueType() == ExtVT && 8690193323Sed ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 8691234353Sdim // Don't duplicate a load with other uses. 8692234353Sdim if (!InVec.hasOneUse()) 8693234353Sdim return SDValue(); 8694234353Sdim 8695193323Sed LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 8696193323Sed } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 8697193323Sed // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 8698193323Sed // => 8699193323Sed // (load $addr+1*size) 8700193323Sed 8701234353Sdim // Don't duplicate a load with other uses. 8702234353Sdim if (!InVec.hasOneUse()) 8703234353Sdim return SDValue(); 8704234353Sdim 8705193323Sed // If the bit convert changed the number of elements, it is unsafe 8706193323Sed // to examine the mask. 8707193323Sed if (BCNumEltsChanged) 8708193323Sed return SDValue(); 8709193323Sed 8710193323Sed // Select the input vector, guarding against out of range extract vector. 8711193323Sed unsigned NumElems = VT.getVectorNumElements(); 8712218893Sdim int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 8713193323Sed InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 8714193323Sed 8715234353Sdim if (InVec.getOpcode() == ISD::BITCAST) { 8716234353Sdim // Don't duplicate a load with other uses. 8717234353Sdim if (!InVec.hasOneUse()) 8718234353Sdim return SDValue(); 8719234353Sdim 8720193323Sed InVec = InVec.getOperand(0); 8721234353Sdim } 8722193323Sed if (ISD::isNormalLoad(InVec.getNode())) { 8723193323Sed LN0 = cast<LoadSDNode>(InVec); 8724207618Srdivacky Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 8725193323Sed } 8726193323Sed } 8727193323Sed 8728234353Sdim // Make sure we found a non-volatile load and the extractelement is 8729234353Sdim // the only use. 8730223017Sdim if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 8731193323Sed return SDValue(); 8732193323Sed 8733218893Sdim // If Idx was -1 above, Elt is going to be -1, so just return undef. 8734218893Sdim if (Elt == -1) 8735226633Sdim return DAG.getUNDEF(LVT); 8736218893Sdim 8737193323Sed unsigned Align = LN0->getAlignment(); 8738193323Sed if (NewLoad) { 8739193323Sed // Check the resultant load doesn't need a higher alignment than the 8740193323Sed // original load. 8741193323Sed unsigned NewAlign = 8742243830Sdim TLI.getDataLayout() 8743218893Sdim ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext())); 8744193323Sed 8745193323Sed if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT)) 8746193323Sed return SDValue(); 8747193323Sed 8748193323Sed Align = NewAlign; 8749193323Sed } 8750193323Sed 8751193323Sed SDValue NewPtr = LN0->getBasePtr(); 8752218893Sdim unsigned PtrOff = 0; 8753218893Sdim 8754193323Sed if (Elt) { 8755218893Sdim PtrOff = LVT.getSizeInBits() * Elt / 8; 8756198090Srdivacky EVT PtrType = NewPtr.getValueType(); 8757193323Sed if (TLI.isBigEndian()) 8758193323Sed PtrOff = VT.getSizeInBits() / 8 - PtrOff; 8759193323Sed NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr, 8760193323Sed DAG.getConstant(PtrOff, PtrType)); 8761193323Sed } 8762193323Sed 8763234353Sdim // The replacement we need to do here is a little tricky: we need to 8764234353Sdim // replace an extractelement of a load with a load. 8765234353Sdim // Use ReplaceAllUsesOfValuesWith to do the replacement. 8766234353Sdim // Note that this replacement assumes that the extractvalue is the only 8767234353Sdim // use of the load; that's okay because we don't want to perform this 8768234353Sdim // transformation in other cases anyway. 8769234353Sdim SDValue Load; 8770234353Sdim SDValue Chain; 8771234353Sdim if (NVT.bitsGT(LVT)) { 8772234353Sdim // If the result type of vextract is wider than the load, then issue an 8773234353Sdim // extending load instead. 8774234353Sdim ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, LVT) 8775234353Sdim ? ISD::ZEXTLOAD : ISD::EXTLOAD; 8776234353Sdim Load = DAG.getExtLoad(ExtType, N->getDebugLoc(), NVT, LN0->getChain(), 8777234353Sdim NewPtr, LN0->getPointerInfo().getWithOffset(PtrOff), 8778234353Sdim LVT, LN0->isVolatile(), LN0->isNonTemporal(),Align); 8779234353Sdim Chain = Load.getValue(1); 8780234353Sdim } else { 8781234353Sdim Load = DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr, 8782234353Sdim LN0->getPointerInfo().getWithOffset(PtrOff), 8783234353Sdim LN0->isVolatile(), LN0->isNonTemporal(), 8784234353Sdim LN0->isInvariant(), Align); 8785234353Sdim Chain = Load.getValue(1); 8786234353Sdim if (NVT.bitsLT(LVT)) 8787234353Sdim Load = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), NVT, Load); 8788234353Sdim else 8789234353Sdim Load = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), NVT, Load); 8790234353Sdim } 8791234353Sdim WorkListRemover DeadNodes(*this); 8792234353Sdim SDValue From[] = { SDValue(N, 0), SDValue(LN0,1) }; 8793234353Sdim SDValue To[] = { Load, Chain }; 8794239462Sdim DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 8795234353Sdim // Since we're explcitly calling ReplaceAllUses, add the new node to the 8796234353Sdim // worklist explicitly as well. 8797234353Sdim AddToWorkList(Load.getNode()); 8798234353Sdim AddUsersToWorkList(Load.getNode()); // Add users too 8799234353Sdim // Make sure to revisit this node to clean it up; it will usually be dead. 8800234353Sdim AddToWorkList(N); 8801234353Sdim return SDValue(N, 0); 8802193323Sed } 8803193323Sed 8804193323Sed return SDValue(); 8805193323Sed} 8806193323Sed 8807243830Sdim// Simplify (build_vec (ext )) to (bitcast (build_vec )) 8808243830SdimSDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { 8809243830Sdim // We perform this optimization post type-legalization because 8810243830Sdim // the type-legalizer often scalarizes integer-promoted vectors. 8811243830Sdim // Performing this optimization before may create bit-casts which 8812243830Sdim // will be type-legalized to complex code sequences. 8813243830Sdim // We perform this optimization only before the operation legalizer because we 8814243830Sdim // may introduce illegal operations. 8815243830Sdim if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) 8816243830Sdim return SDValue(); 8817243830Sdim 8818193323Sed unsigned NumInScalars = N->getNumOperands(); 8819234353Sdim DebugLoc dl = N->getDebugLoc(); 8820198090Srdivacky EVT VT = N->getValueType(0); 8821239462Sdim 8822234353Sdim // Check to see if this is a BUILD_VECTOR of a bunch of values 8823234353Sdim // which come from any_extend or zero_extend nodes. If so, we can create 8824234353Sdim // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 8825234353Sdim // optimizations. We do not handle sign-extend because we can't fill the sign 8826234353Sdim // using shuffles. 8827234353Sdim EVT SourceType = MVT::Other; 8828234353Sdim bool AllAnyExt = true; 8829239462Sdim 8830234353Sdim for (unsigned i = 0; i != NumInScalars; ++i) { 8831234353Sdim SDValue In = N->getOperand(i); 8832234353Sdim // Ignore undef inputs. 8833234353Sdim if (In.getOpcode() == ISD::UNDEF) continue; 8834193323Sed 8835234353Sdim bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 8836234353Sdim bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 8837234353Sdim 8838234353Sdim // Abort if the element is not an extension. 8839234353Sdim if (!ZeroExt && !AnyExt) { 8840234353Sdim SourceType = MVT::Other; 8841234353Sdim break; 8842234353Sdim } 8843234353Sdim 8844234353Sdim // The input is a ZeroExt or AnyExt. Check the original type. 8845234353Sdim EVT InTy = In.getOperand(0).getValueType(); 8846234353Sdim 8847234353Sdim // Check that all of the widened source types are the same. 8848234353Sdim if (SourceType == MVT::Other) 8849234353Sdim // First time. 8850234353Sdim SourceType = InTy; 8851234353Sdim else if (InTy != SourceType) { 8852234353Sdim // Multiple income types. Abort. 8853234353Sdim SourceType = MVT::Other; 8854234353Sdim break; 8855234353Sdim } 8856234353Sdim 8857234353Sdim // Check if all of the extends are ANY_EXTENDs. 8858234353Sdim AllAnyExt &= AnyExt; 8859234353Sdim } 8860234353Sdim 8861234353Sdim // In order to have valid types, all of the inputs must be extended from the 8862234353Sdim // same source type and all of the inputs must be any or zero extend. 8863234353Sdim // Scalar sizes must be a power of two. 8864243830Sdim EVT OutScalarTy = VT.getScalarType(); 8865234353Sdim bool ValidTypes = SourceType != MVT::Other && 8866234353Sdim isPowerOf2_32(OutScalarTy.getSizeInBits()) && 8867234353Sdim isPowerOf2_32(SourceType.getSizeInBits()); 8868234353Sdim 8869234353Sdim // Create a new simpler BUILD_VECTOR sequence which other optimizations can 8870234353Sdim // turn into a single shuffle instruction. 8871243830Sdim if (!ValidTypes) 8872243830Sdim return SDValue(); 8873234353Sdim 8874243830Sdim bool isLE = TLI.isLittleEndian(); 8875243830Sdim unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 8876243830Sdim assert(ElemRatio > 1 && "Invalid element size ratio"); 8877243830Sdim SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 8878243830Sdim DAG.getConstant(0, SourceType); 8879234353Sdim 8880243830Sdim unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); 8881243830Sdim SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 8882234353Sdim 8883243830Sdim // Populate the new build_vector 8884243830Sdim for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 8885243830Sdim SDValue Cast = N->getOperand(i); 8886243830Sdim assert((Cast.getOpcode() == ISD::ANY_EXTEND || 8887243830Sdim Cast.getOpcode() == ISD::ZERO_EXTEND || 8888243830Sdim Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); 8889243830Sdim SDValue In; 8890243830Sdim if (Cast.getOpcode() == ISD::UNDEF) 8891243830Sdim In = DAG.getUNDEF(SourceType); 8892243830Sdim else 8893243830Sdim In = Cast->getOperand(0); 8894243830Sdim unsigned Index = isLE ? (i * ElemRatio) : 8895243830Sdim (i * ElemRatio + (ElemRatio - 1)); 8896243830Sdim 8897243830Sdim assert(Index < Ops.size() && "Invalid index"); 8898243830Sdim Ops[Index] = In; 8899243830Sdim } 8900243830Sdim 8901243830Sdim // The type of the new BUILD_VECTOR node. 8902243830Sdim EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 8903243830Sdim assert(VecVT.getSizeInBits() == VT.getSizeInBits() && 8904243830Sdim "Invalid vector size"); 8905243830Sdim // Check if the new vector type is legal. 8906243830Sdim if (!isTypeLegal(VecVT)) return SDValue(); 8907243830Sdim 8908243830Sdim // Make the new BUILD_VECTOR. 8909243830Sdim SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], Ops.size()); 8910243830Sdim 8911243830Sdim // The new BUILD_VECTOR node has the potential to be further optimized. 8912243830Sdim AddToWorkList(BV.getNode()); 8913243830Sdim // Bitcast to the desired type. 8914243830Sdim return DAG.getNode(ISD::BITCAST, dl, VT, BV); 8915243830Sdim} 8916243830Sdim 8917243830SdimSDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { 8918243830Sdim EVT VT = N->getValueType(0); 8919243830Sdim 8920243830Sdim unsigned NumInScalars = N->getNumOperands(); 8921243830Sdim DebugLoc dl = N->getDebugLoc(); 8922243830Sdim 8923243830Sdim EVT SrcVT = MVT::Other; 8924243830Sdim unsigned Opcode = ISD::DELETED_NODE; 8925243830Sdim unsigned NumDefs = 0; 8926243830Sdim 8927243830Sdim for (unsigned i = 0; i != NumInScalars; ++i) { 8928243830Sdim SDValue In = N->getOperand(i); 8929243830Sdim unsigned Opc = In.getOpcode(); 8930243830Sdim 8931243830Sdim if (Opc == ISD::UNDEF) 8932243830Sdim continue; 8933243830Sdim 8934243830Sdim // If all scalar values are floats and converted from integers. 8935243830Sdim if (Opcode == ISD::DELETED_NODE && 8936243830Sdim (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { 8937243830Sdim Opcode = Opc; 8938234353Sdim } 8939249423Sdim 8940243830Sdim if (Opc != Opcode) 8941243830Sdim return SDValue(); 8942234353Sdim 8943243830Sdim EVT InVT = In.getOperand(0).getValueType(); 8944234353Sdim 8945243830Sdim // If all scalar values are typed differently, bail out. It's chosen to 8946243830Sdim // simplify BUILD_VECTOR of integer types. 8947243830Sdim if (SrcVT == MVT::Other) 8948243830Sdim SrcVT = InVT; 8949243830Sdim if (SrcVT != InVT) 8950243830Sdim return SDValue(); 8951243830Sdim NumDefs++; 8952243830Sdim } 8953234353Sdim 8954243830Sdim // If the vector has just one element defined, it's not worth to fold it into 8955243830Sdim // a vectorized one. 8956243830Sdim if (NumDefs < 2) 8957243830Sdim return SDValue(); 8958243830Sdim 8959243830Sdim assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) 8960243830Sdim && "Should only handle conversion from integer to float."); 8961243830Sdim assert(SrcVT != MVT::Other && "Cannot determine source type!"); 8962243830Sdim 8963243830Sdim EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); 8964249423Sdim 8965249423Sdim if (!TLI.isOperationLegalOrCustom(Opcode, NVT)) 8966249423Sdim return SDValue(); 8967249423Sdim 8968243830Sdim SmallVector<SDValue, 8> Opnds; 8969243830Sdim for (unsigned i = 0; i != NumInScalars; ++i) { 8970243830Sdim SDValue In = N->getOperand(i); 8971243830Sdim 8972243830Sdim if (In.getOpcode() == ISD::UNDEF) 8973243830Sdim Opnds.push_back(DAG.getUNDEF(SrcVT)); 8974243830Sdim else 8975243830Sdim Opnds.push_back(In.getOperand(0)); 8976234353Sdim } 8977243830Sdim SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, 8978243830Sdim &Opnds[0], Opnds.size()); 8979243830Sdim AddToWorkList(BV.getNode()); 8980234353Sdim 8981243830Sdim return DAG.getNode(Opcode, dl, VT, BV); 8982243830Sdim} 8983243830Sdim 8984243830SdimSDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 8985243830Sdim unsigned NumInScalars = N->getNumOperands(); 8986243830Sdim DebugLoc dl = N->getDebugLoc(); 8987243830Sdim EVT VT = N->getValueType(0); 8988243830Sdim 8989243830Sdim // A vector built entirely of undefs is undef. 8990243830Sdim if (ISD::allOperandsUndef(N)) 8991243830Sdim return DAG.getUNDEF(VT); 8992243830Sdim 8993243830Sdim SDValue V = reduceBuildVecExtToExtBuildVec(N); 8994243830Sdim if (V.getNode()) 8995243830Sdim return V; 8996243830Sdim 8997243830Sdim V = reduceBuildVecConvertToConvertBuildVec(N); 8998243830Sdim if (V.getNode()) 8999243830Sdim return V; 9000243830Sdim 9001193323Sed // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 9002193323Sed // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from 9003193323Sed // at most two distinct vectors, turn this into a shuffle node. 9004234353Sdim 9005234353Sdim // May only combine to shuffle after legalize if shuffle is legal. 9006234353Sdim if (LegalOperations && 9007234353Sdim !TLI.isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT)) 9008234353Sdim return SDValue(); 9009234353Sdim 9010193323Sed SDValue VecIn1, VecIn2; 9011193323Sed for (unsigned i = 0; i != NumInScalars; ++i) { 9012193323Sed // Ignore undef inputs. 9013193323Sed if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 9014193323Sed 9015193323Sed // If this input is something other than a EXTRACT_VECTOR_ELT with a 9016193323Sed // constant index, bail out. 9017193323Sed if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 9018193323Sed !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) { 9019193323Sed VecIn1 = VecIn2 = SDValue(0, 0); 9020193323Sed break; 9021193323Sed } 9022193323Sed 9023234353Sdim // We allow up to two distinct input vectors. 9024193323Sed SDValue ExtractedFromVec = N->getOperand(i).getOperand(0); 9025193323Sed if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2) 9026193323Sed continue; 9027193323Sed 9028193323Sed if (VecIn1.getNode() == 0) { 9029193323Sed VecIn1 = ExtractedFromVec; 9030193323Sed } else if (VecIn2.getNode() == 0) { 9031193323Sed VecIn2 = ExtractedFromVec; 9032193323Sed } else { 9033193323Sed // Too many inputs. 9034193323Sed VecIn1 = VecIn2 = SDValue(0, 0); 9035193323Sed break; 9036193323Sed } 9037193323Sed } 9038193323Sed 9039234353Sdim // If everything is good, we can make a shuffle operation. 9040193323Sed if (VecIn1.getNode()) { 9041193323Sed SmallVector<int, 8> Mask; 9042193323Sed for (unsigned i = 0; i != NumInScalars; ++i) { 9043193323Sed if (N->getOperand(i).getOpcode() == ISD::UNDEF) { 9044193323Sed Mask.push_back(-1); 9045193323Sed continue; 9046193323Sed } 9047193323Sed 9048193323Sed // If extracting from the first vector, just use the index directly. 9049193323Sed SDValue Extract = N->getOperand(i); 9050193323Sed SDValue ExtVal = Extract.getOperand(1); 9051193323Sed if (Extract.getOperand(0) == VecIn1) { 9052193323Sed unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 9053193323Sed if (ExtIndex > VT.getVectorNumElements()) 9054193323Sed return SDValue(); 9055218893Sdim 9056193323Sed Mask.push_back(ExtIndex); 9057193323Sed continue; 9058193323Sed } 9059193323Sed 9060193323Sed // Otherwise, use InIdx + VecSize 9061193323Sed unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 9062193323Sed Mask.push_back(Idx+NumInScalars); 9063193323Sed } 9064193323Sed 9065234353Sdim // We can't generate a shuffle node with mismatched input and output types. 9066234353Sdim // Attempt to transform a single input vector to the correct type. 9067234353Sdim if ((VT != VecIn1.getValueType())) { 9068234353Sdim // We don't support shuffeling between TWO values of different types. 9069234353Sdim if (VecIn2.getNode() != 0) 9070234353Sdim return SDValue(); 9071234353Sdim 9072234353Sdim // We only support widening of vectors which are half the size of the 9073234353Sdim // output registers. For example XMM->YMM widening on X86 with AVX. 9074234353Sdim if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits()) 9075234353Sdim return SDValue(); 9076234353Sdim 9077243830Sdim // If the input vector type has a different base type to the output 9078243830Sdim // vector type, bail out. 9079243830Sdim if (VecIn1.getValueType().getVectorElementType() != 9080243830Sdim VT.getVectorElementType()) 9081243830Sdim return SDValue(); 9082243830Sdim 9083234353Sdim // Widen the input vector by adding undef values. 9084243830Sdim VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, 9085234353Sdim VecIn1, DAG.getUNDEF(VecIn1.getValueType())); 9086234353Sdim } 9087234353Sdim 9088234353Sdim // If VecIn2 is unused then change it to undef. 9089234353Sdim VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 9090234353Sdim 9091243830Sdim // Check that we were able to transform all incoming values to the same 9092243830Sdim // type. 9093234353Sdim if (VecIn2.getValueType() != VecIn1.getValueType() || 9094234353Sdim VecIn1.getValueType() != VT) 9095234353Sdim return SDValue(); 9096234353Sdim 9097234353Sdim // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 9098207618Srdivacky if (!isTypeLegal(VT)) 9099193323Sed return SDValue(); 9100193323Sed 9101193323Sed // Return the new VECTOR_SHUFFLE node. 9102193323Sed SDValue Ops[2]; 9103193323Sed Ops[0] = VecIn1; 9104234353Sdim Ops[1] = VecIn2; 9105243830Sdim return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]); 9106193323Sed } 9107193323Sed 9108193323Sed return SDValue(); 9109193323Sed} 9110193323Sed 9111193323SedSDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 9112193323Sed // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of 9113193323Sed // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector 9114193323Sed // inputs come from at most two distinct vectors, turn this into a shuffle 9115193323Sed // node. 9116193323Sed 9117193323Sed // If we only have one input vector, we don't need to do any concatenation. 9118193323Sed if (N->getNumOperands() == 1) 9119193323Sed return N->getOperand(0); 9120193323Sed 9121239462Sdim // Check if all of the operands are undefs. 9122239462Sdim if (ISD::allOperandsUndef(N)) 9123239462Sdim return DAG.getUNDEF(N->getValueType(0)); 9124239462Sdim 9125251662Sdim // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR 9126251662Sdim // nodes often generate nop CONCAT_VECTOR nodes. 9127251662Sdim // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that 9128251662Sdim // place the incoming vectors at the exact same location. 9129251662Sdim SDValue SingleSource = SDValue(); 9130251662Sdim unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); 9131251662Sdim 9132251662Sdim for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 9133251662Sdim SDValue Op = N->getOperand(i); 9134251662Sdim 9135251662Sdim if (Op.getOpcode() == ISD::UNDEF) 9136251662Sdim continue; 9137251662Sdim 9138251662Sdim // Check if this is the identity extract: 9139251662Sdim if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 9140251662Sdim return SDValue(); 9141251662Sdim 9142251662Sdim // Find the single incoming vector for the extract_subvector. 9143251662Sdim if (SingleSource.getNode()) { 9144251662Sdim if (Op.getOperand(0) != SingleSource) 9145251662Sdim return SDValue(); 9146251662Sdim } else { 9147251662Sdim SingleSource = Op.getOperand(0); 9148251662Sdim 9149251662Sdim // Check the source type is the same as the type of the result. 9150251662Sdim // If not, this concat may extend the vector, so we can not 9151251662Sdim // optimize it away. 9152251662Sdim if (SingleSource.getValueType() != N->getValueType(0)) 9153251662Sdim return SDValue(); 9154251662Sdim } 9155251662Sdim 9156251662Sdim unsigned IdentityIndex = i * PartNumElem; 9157251662Sdim ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9158251662Sdim // The extract index must be constant. 9159251662Sdim if (!CS) 9160251662Sdim return SDValue(); 9161251662Sdim 9162251662Sdim // Check that we are reading from the identity index. 9163251662Sdim if (CS->getZExtValue() != IdentityIndex) 9164251662Sdim return SDValue(); 9165251662Sdim } 9166251662Sdim 9167251662Sdim if (SingleSource.getNode()) 9168251662Sdim return SingleSource; 9169251662Sdim 9170193323Sed return SDValue(); 9171193323Sed} 9172193323Sed 9173226633SdimSDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 9174226633Sdim EVT NVT = N->getValueType(0); 9175226633Sdim SDValue V = N->getOperand(0); 9176226633Sdim 9177249423Sdim if (V->getOpcode() == ISD::CONCAT_VECTORS) { 9178249423Sdim // Combine: 9179249423Sdim // (extract_subvec (concat V1, V2, ...), i) 9180249423Sdim // Into: 9181249423Sdim // Vi if possible 9182249423Sdim // Only operand 0 is checked as 'concat' assumes all inputs of the same type. 9183249423Sdim if (V->getOperand(0).getValueType() != NVT) 9184249423Sdim return SDValue(); 9185249423Sdim unsigned Idx = dyn_cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 9186249423Sdim unsigned NumElems = NVT.getVectorNumElements(); 9187249423Sdim assert((Idx % NumElems) == 0 && 9188249423Sdim "IDX in concat is not a multiple of the result vector length."); 9189249423Sdim return V->getOperand(Idx / NumElems); 9190249423Sdim } 9191249423Sdim 9192249423Sdim // Skip bitcasting 9193249423Sdim if (V->getOpcode() == ISD::BITCAST) 9194249423Sdim V = V.getOperand(0); 9195249423Sdim 9196226633Sdim if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 9197249423Sdim DebugLoc dl = N->getDebugLoc(); 9198226633Sdim // Handle only simple case where vector being inserted and vector 9199226633Sdim // being extracted are of same type, and are half size of larger vectors. 9200226633Sdim EVT BigVT = V->getOperand(0).getValueType(); 9201226633Sdim EVT SmallVT = V->getOperand(1).getValueType(); 9202249423Sdim if (!NVT.bitsEq(SmallVT) || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) 9203226633Sdim return SDValue(); 9204226633Sdim 9205234353Sdim // Only handle cases where both indexes are constants with the same type. 9206243830Sdim ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9207243830Sdim ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 9208226633Sdim 9209234353Sdim if (InsIdx && ExtIdx && 9210234353Sdim InsIdx->getValueType(0).getSizeInBits() <= 64 && 9211234353Sdim ExtIdx->getValueType(0).getSizeInBits() <= 64) { 9212234353Sdim // Combine: 9213234353Sdim // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 9214234353Sdim // Into: 9215249423Sdim // indices are equal or bit offsets are equal => V1 9216234353Sdim // otherwise => (extract_subvec V1, ExtIdx) 9217249423Sdim if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() == 9218249423Sdim ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits()) 9219249423Sdim return DAG.getNode(ISD::BITCAST, dl, NVT, V->getOperand(1)); 9220249423Sdim return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NVT, 9221249423Sdim DAG.getNode(ISD::BITCAST, dl, 9222249423Sdim N->getOperand(0).getValueType(), 9223249423Sdim V->getOperand(0)), N->getOperand(1)); 9224234353Sdim } 9225226633Sdim } 9226226633Sdim 9227226633Sdim return SDValue(); 9228226633Sdim} 9229226633Sdim 9230251662Sdim// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat. 9231251662Sdimstatic SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { 9232251662Sdim EVT VT = N->getValueType(0); 9233251662Sdim unsigned NumElts = VT.getVectorNumElements(); 9234251662Sdim 9235251662Sdim SDValue N0 = N->getOperand(0); 9236251662Sdim SDValue N1 = N->getOperand(1); 9237251662Sdim ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 9238251662Sdim 9239251662Sdim SmallVector<SDValue, 4> Ops; 9240251662Sdim EVT ConcatVT = N0.getOperand(0).getValueType(); 9241251662Sdim unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); 9242251662Sdim unsigned NumConcats = NumElts / NumElemsPerConcat; 9243251662Sdim 9244251662Sdim // Look at every vector that's inserted. We're looking for exact 9245251662Sdim // subvector-sized copies from a concatenated vector 9246251662Sdim for (unsigned I = 0; I != NumConcats; ++I) { 9247251662Sdim // Make sure we're dealing with a copy. 9248251662Sdim unsigned Begin = I * NumElemsPerConcat; 9249251662Sdim if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) 9250251662Sdim return SDValue(); 9251251662Sdim 9252251662Sdim for (unsigned J = 1; J != NumElemsPerConcat; ++J) { 9253251662Sdim if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) 9254251662Sdim return SDValue(); 9255251662Sdim } 9256251662Sdim 9257251662Sdim unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; 9258251662Sdim if (FirstElt < N0.getNumOperands()) 9259251662Sdim Ops.push_back(N0.getOperand(FirstElt)); 9260251662Sdim else 9261251662Sdim Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); 9262251662Sdim } 9263251662Sdim 9264251662Sdim return DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, Ops.data(), 9265251662Sdim Ops.size()); 9266251662Sdim} 9267251662Sdim 9268193323SedSDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 9269198090Srdivacky EVT VT = N->getValueType(0); 9270193323Sed unsigned NumElts = VT.getVectorNumElements(); 9271193323Sed 9272193323Sed SDValue N0 = N->getOperand(0); 9273234353Sdim SDValue N1 = N->getOperand(1); 9274193323Sed 9275234353Sdim assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 9276193323Sed 9277234353Sdim // Canonicalize shuffle undef, undef -> undef 9278234353Sdim if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 9279234353Sdim return DAG.getUNDEF(VT); 9280193323Sed 9281234353Sdim ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 9282234353Sdim 9283234353Sdim // Canonicalize shuffle v, v -> v, undef 9284234353Sdim if (N0 == N1) { 9285234353Sdim SmallVector<int, 8> NewMask; 9286234353Sdim for (unsigned i = 0; i != NumElts; ++i) { 9287234353Sdim int Idx = SVN->getMaskElt(i); 9288234353Sdim if (Idx >= (int)NumElts) Idx -= NumElts; 9289234353Sdim NewMask.push_back(Idx); 9290234353Sdim } 9291234353Sdim return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, DAG.getUNDEF(VT), 9292234353Sdim &NewMask[0]); 9293234353Sdim } 9294234353Sdim 9295234353Sdim // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 9296234353Sdim if (N0.getOpcode() == ISD::UNDEF) { 9297234353Sdim SmallVector<int, 8> NewMask; 9298234353Sdim for (unsigned i = 0; i != NumElts; ++i) { 9299234353Sdim int Idx = SVN->getMaskElt(i); 9300234353Sdim if (Idx >= 0) { 9301234353Sdim if (Idx < (int)NumElts) 9302234353Sdim Idx += NumElts; 9303234353Sdim else 9304234353Sdim Idx -= NumElts; 9305234353Sdim } 9306234353Sdim NewMask.push_back(Idx); 9307234353Sdim } 9308234353Sdim return DAG.getVectorShuffle(VT, N->getDebugLoc(), N1, DAG.getUNDEF(VT), 9309234353Sdim &NewMask[0]); 9310234353Sdim } 9311234353Sdim 9312234353Sdim // Remove references to rhs if it is undef 9313234353Sdim if (N1.getOpcode() == ISD::UNDEF) { 9314234353Sdim bool Changed = false; 9315234353Sdim SmallVector<int, 8> NewMask; 9316234353Sdim for (unsigned i = 0; i != NumElts; ++i) { 9317234353Sdim int Idx = SVN->getMaskElt(i); 9318234353Sdim if (Idx >= (int)NumElts) { 9319234353Sdim Idx = -1; 9320234353Sdim Changed = true; 9321234353Sdim } 9322234353Sdim NewMask.push_back(Idx); 9323234353Sdim } 9324234353Sdim if (Changed) 9325234353Sdim return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, N1, &NewMask[0]); 9326234353Sdim } 9327234353Sdim 9328218893Sdim // If it is a splat, check if the argument vector is another splat or a 9329218893Sdim // build_vector with all scalar elements the same. 9330218893Sdim if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 9331193323Sed SDNode *V = N0.getNode(); 9332193323Sed 9333193323Sed // If this is a bit convert that changes the element type of the vector but 9334193323Sed // not the number of vector elements, look through it. Be careful not to 9335193323Sed // look though conversions that change things like v4f32 to v2f64. 9336218893Sdim if (V->getOpcode() == ISD::BITCAST) { 9337193323Sed SDValue ConvInput = V->getOperand(0); 9338193323Sed if (ConvInput.getValueType().isVector() && 9339193323Sed ConvInput.getValueType().getVectorNumElements() == NumElts) 9340193323Sed V = ConvInput.getNode(); 9341193323Sed } 9342193323Sed 9343193323Sed if (V->getOpcode() == ISD::BUILD_VECTOR) { 9344218893Sdim assert(V->getNumOperands() == NumElts && 9345218893Sdim "BUILD_VECTOR has wrong number of operands"); 9346218893Sdim SDValue Base; 9347218893Sdim bool AllSame = true; 9348218893Sdim for (unsigned i = 0; i != NumElts; ++i) { 9349218893Sdim if (V->getOperand(i).getOpcode() != ISD::UNDEF) { 9350218893Sdim Base = V->getOperand(i); 9351218893Sdim break; 9352193323Sed } 9353218893Sdim } 9354218893Sdim // Splat of <u, u, u, u>, return <u, u, u, u> 9355218893Sdim if (!Base.getNode()) 9356218893Sdim return N0; 9357218893Sdim for (unsigned i = 0; i != NumElts; ++i) { 9358218893Sdim if (V->getOperand(i) != Base) { 9359218893Sdim AllSame = false; 9360218893Sdim break; 9361193323Sed } 9362193323Sed } 9363218893Sdim // Splat of <x, x, x, x>, return <x, x, x, x> 9364218893Sdim if (AllSame) 9365218893Sdim return N0; 9366193323Sed } 9367193323Sed } 9368234353Sdim 9369251662Sdim if (N0.getOpcode() == ISD::CONCAT_VECTORS && 9370251662Sdim Level < AfterLegalizeVectorOps && 9371251662Sdim (N1.getOpcode() == ISD::UNDEF || 9372251662Sdim (N1.getOpcode() == ISD::CONCAT_VECTORS && 9373251662Sdim N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { 9374251662Sdim SDValue V = partitionShuffleOfConcats(N, DAG); 9375251662Sdim 9376251662Sdim if (V.getNode()) 9377251662Sdim return V; 9378251662Sdim } 9379251662Sdim 9380234353Sdim // If this shuffle node is simply a swizzle of another shuffle node, 9381234353Sdim // and it reverses the swizzle of the previous shuffle then we can 9382234353Sdim // optimize shuffle(shuffle(x, undef), undef) -> x. 9383234353Sdim if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 9384234353Sdim N1.getOpcode() == ISD::UNDEF) { 9385234353Sdim 9386234353Sdim ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 9387234353Sdim 9388234353Sdim // Shuffle nodes can only reverse shuffles with a single non-undef value. 9389234353Sdim if (N0.getOperand(1).getOpcode() != ISD::UNDEF) 9390234353Sdim return SDValue(); 9391234353Sdim 9392234353Sdim // The incoming shuffle must be of the same type as the result of the 9393234353Sdim // current shuffle. 9394234353Sdim assert(OtherSV->getOperand(0).getValueType() == VT && 9395234353Sdim "Shuffle types don't match"); 9396234353Sdim 9397234353Sdim for (unsigned i = 0; i != NumElts; ++i) { 9398234353Sdim int Idx = SVN->getMaskElt(i); 9399234353Sdim assert(Idx < (int)NumElts && "Index references undef operand"); 9400234353Sdim // Next, this index comes from the first value, which is the incoming 9401234353Sdim // shuffle. Adopt the incoming index. 9402234353Sdim if (Idx >= 0) 9403234353Sdim Idx = OtherSV->getMaskElt(Idx); 9404234353Sdim 9405234353Sdim // The combined shuffle must map each index to itself. 9406234353Sdim if (Idx >= 0 && (unsigned)Idx != i) 9407234353Sdim return SDValue(); 9408234353Sdim } 9409234353Sdim 9410234353Sdim return OtherSV->getOperand(0); 9411234353Sdim } 9412234353Sdim 9413193323Sed return SDValue(); 9414193323Sed} 9415193323Sed 9416193323Sed/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform 9417193323Sed/// an AND to a vector_shuffle with the destination vector and a zero vector. 9418193323Sed/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 9419193323Sed/// vector_shuffle V, Zero, <0, 4, 2, 4> 9420193323SedSDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 9421198090Srdivacky EVT VT = N->getValueType(0); 9422193323Sed DebugLoc dl = N->getDebugLoc(); 9423193323Sed SDValue LHS = N->getOperand(0); 9424193323Sed SDValue RHS = N->getOperand(1); 9425193323Sed if (N->getOpcode() == ISD::AND) { 9426218893Sdim if (RHS.getOpcode() == ISD::BITCAST) 9427193323Sed RHS = RHS.getOperand(0); 9428193323Sed if (RHS.getOpcode() == ISD::BUILD_VECTOR) { 9429193323Sed SmallVector<int, 8> Indices; 9430193323Sed unsigned NumElts = RHS.getNumOperands(); 9431193323Sed for (unsigned i = 0; i != NumElts; ++i) { 9432193323Sed SDValue Elt = RHS.getOperand(i); 9433193323Sed if (!isa<ConstantSDNode>(Elt)) 9434193323Sed return SDValue(); 9435234353Sdim 9436234353Sdim if (cast<ConstantSDNode>(Elt)->isAllOnesValue()) 9437193323Sed Indices.push_back(i); 9438193323Sed else if (cast<ConstantSDNode>(Elt)->isNullValue()) 9439193323Sed Indices.push_back(NumElts); 9440193323Sed else 9441193323Sed return SDValue(); 9442193323Sed } 9443193323Sed 9444193323Sed // Let's see if the target supports this vector_shuffle. 9445198090Srdivacky EVT RVT = RHS.getValueType(); 9446193323Sed if (!TLI.isVectorClearMaskLegal(Indices, RVT)) 9447193323Sed return SDValue(); 9448193323Sed 9449193323Sed // Return the new VECTOR_SHUFFLE node. 9450198090Srdivacky EVT EltVT = RVT.getVectorElementType(); 9451193323Sed SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(), 9452198090Srdivacky DAG.getConstant(0, EltVT)); 9453193323Sed SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 9454193323Sed RVT, &ZeroOps[0], ZeroOps.size()); 9455218893Sdim LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS); 9456193323Sed SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]); 9457218893Sdim return DAG.getNode(ISD::BITCAST, dl, VT, Shuf); 9458193323Sed } 9459193323Sed } 9460193323Sed 9461193323Sed return SDValue(); 9462193323Sed} 9463193323Sed 9464193323Sed/// SimplifyVBinOp - Visit a binary vector operation, like ADD. 9465193323SedSDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 9466218893Sdim assert(N->getValueType(0).isVector() && 9467218893Sdim "SimplifyVBinOp only works on vectors!"); 9468193323Sed 9469193323Sed SDValue LHS = N->getOperand(0); 9470193323Sed SDValue RHS = N->getOperand(1); 9471193323Sed SDValue Shuffle = XformToShuffleWithZero(N); 9472193323Sed if (Shuffle.getNode()) return Shuffle; 9473193323Sed 9474193323Sed // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold 9475193323Sed // this operation. 9476193323Sed if (LHS.getOpcode() == ISD::BUILD_VECTOR && 9477193323Sed RHS.getOpcode() == ISD::BUILD_VECTOR) { 9478193323Sed SmallVector<SDValue, 8> Ops; 9479193323Sed for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 9480193323Sed SDValue LHSOp = LHS.getOperand(i); 9481193323Sed SDValue RHSOp = RHS.getOperand(i); 9482193323Sed // If these two elements can't be folded, bail out. 9483193323Sed if ((LHSOp.getOpcode() != ISD::UNDEF && 9484193323Sed LHSOp.getOpcode() != ISD::Constant && 9485193323Sed LHSOp.getOpcode() != ISD::ConstantFP) || 9486193323Sed (RHSOp.getOpcode() != ISD::UNDEF && 9487193323Sed RHSOp.getOpcode() != ISD::Constant && 9488193323Sed RHSOp.getOpcode() != ISD::ConstantFP)) 9489193323Sed break; 9490193323Sed 9491193323Sed // Can't fold divide by zero. 9492193323Sed if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV || 9493193323Sed N->getOpcode() == ISD::FDIV) { 9494193323Sed if ((RHSOp.getOpcode() == ISD::Constant && 9495193323Sed cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) || 9496193323Sed (RHSOp.getOpcode() == ISD::ConstantFP && 9497193323Sed cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero())) 9498193323Sed break; 9499193323Sed } 9500193323Sed 9501218893Sdim EVT VT = LHSOp.getValueType(); 9502234353Sdim EVT RVT = RHSOp.getValueType(); 9503234353Sdim if (RVT != VT) { 9504234353Sdim // Integer BUILD_VECTOR operands may have types larger than the element 9505234353Sdim // size (e.g., when the element type is not legal). Prior to type 9506234353Sdim // legalization, the types may not match between the two BUILD_VECTORS. 9507234353Sdim // Truncate one of the operands to make them match. 9508234353Sdim if (RVT.getSizeInBits() > VT.getSizeInBits()) { 9509234353Sdim RHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, RHSOp); 9510234353Sdim } else { 9511234353Sdim LHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), RVT, LHSOp); 9512234353Sdim VT = RVT; 9513234353Sdim } 9514234353Sdim } 9515218893Sdim SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), VT, 9516208599Srdivacky LHSOp, RHSOp); 9517208599Srdivacky if (FoldOp.getOpcode() != ISD::UNDEF && 9518208599Srdivacky FoldOp.getOpcode() != ISD::Constant && 9519208599Srdivacky FoldOp.getOpcode() != ISD::ConstantFP) 9520208599Srdivacky break; 9521208599Srdivacky Ops.push_back(FoldOp); 9522208599Srdivacky AddToWorkList(FoldOp.getNode()); 9523193323Sed } 9524193323Sed 9525218893Sdim if (Ops.size() == LHS.getNumOperands()) 9526218893Sdim return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 9527218893Sdim LHS.getValueType(), &Ops[0], Ops.size()); 9528193323Sed } 9529193323Sed 9530193323Sed return SDValue(); 9531193323Sed} 9532193323Sed 9533243830Sdim/// SimplifyVUnaryOp - Visit a binary vector operation, like FABS/FNEG. 9534243830SdimSDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) { 9535243830Sdim assert(N->getValueType(0).isVector() && 9536243830Sdim "SimplifyVUnaryOp only works on vectors!"); 9537243830Sdim 9538243830Sdim SDValue N0 = N->getOperand(0); 9539243830Sdim 9540243830Sdim if (N0.getOpcode() != ISD::BUILD_VECTOR) 9541243830Sdim return SDValue(); 9542243830Sdim 9543243830Sdim // Operand is a BUILD_VECTOR node, see if we can constant fold it. 9544243830Sdim SmallVector<SDValue, 8> Ops; 9545243830Sdim for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 9546243830Sdim SDValue Op = N0.getOperand(i); 9547243830Sdim if (Op.getOpcode() != ISD::UNDEF && 9548243830Sdim Op.getOpcode() != ISD::ConstantFP) 9549243830Sdim break; 9550243830Sdim EVT EltVT = Op.getValueType(); 9551243830Sdim SDValue FoldOp = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), EltVT, Op); 9552243830Sdim if (FoldOp.getOpcode() != ISD::UNDEF && 9553243830Sdim FoldOp.getOpcode() != ISD::ConstantFP) 9554243830Sdim break; 9555243830Sdim Ops.push_back(FoldOp); 9556243830Sdim AddToWorkList(FoldOp.getNode()); 9557243830Sdim } 9558243830Sdim 9559243830Sdim if (Ops.size() != N0.getNumOperands()) 9560243830Sdim return SDValue(); 9561243830Sdim 9562243830Sdim return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 9563243830Sdim N0.getValueType(), &Ops[0], Ops.size()); 9564243830Sdim} 9565243830Sdim 9566193323SedSDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0, 9567193323Sed SDValue N1, SDValue N2){ 9568193323Sed assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 9569193323Sed 9570193323Sed SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 9571193323Sed cast<CondCodeSDNode>(N0.getOperand(2))->get()); 9572193323Sed 9573193323Sed // If we got a simplified select_cc node back from SimplifySelectCC, then 9574193323Sed // break it down into a new SETCC node, and a new SELECT node, and then return 9575193323Sed // the SELECT node, since we were called with a SELECT node. 9576193323Sed if (SCC.getNode()) { 9577193323Sed // Check to see if we got a select_cc back (to turn into setcc/select). 9578193323Sed // Otherwise, just return whatever node we got back, like fabs. 9579193323Sed if (SCC.getOpcode() == ISD::SELECT_CC) { 9580193323Sed SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getDebugLoc(), 9581193323Sed N0.getValueType(), 9582193323Sed SCC.getOperand(0), SCC.getOperand(1), 9583193323Sed SCC.getOperand(4)); 9584193323Sed AddToWorkList(SETCC.getNode()); 9585193323Sed return DAG.getNode(ISD::SELECT, SCC.getDebugLoc(), SCC.getValueType(), 9586193323Sed SCC.getOperand(2), SCC.getOperand(3), SETCC); 9587193323Sed } 9588193323Sed 9589193323Sed return SCC; 9590193323Sed } 9591193323Sed return SDValue(); 9592193323Sed} 9593193323Sed 9594193323Sed/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS 9595193323Sed/// are the two values being selected between, see if we can simplify the 9596193323Sed/// select. Callers of this should assume that TheSelect is deleted if this 9597193323Sed/// returns true. As such, they should return the appropriate thing (e.g. the 9598193323Sed/// node) back to the top-level of the DAG combiner loop to avoid it being 9599193323Sed/// looked at. 9600193323Sedbool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 9601193323Sed SDValue RHS) { 9602193323Sed 9603218893Sdim // Cannot simplify select with vector condition 9604218893Sdim if (TheSelect->getOperand(0).getValueType().isVector()) return false; 9605218893Sdim 9606193323Sed // If this is a select from two identical things, try to pull the operation 9607193323Sed // through the select. 9608218893Sdim if (LHS.getOpcode() != RHS.getOpcode() || 9609218893Sdim !LHS.hasOneUse() || !RHS.hasOneUse()) 9610218893Sdim return false; 9611218893Sdim 9612218893Sdim // If this is a load and the token chain is identical, replace the select 9613218893Sdim // of two loads with a load through a select of the address to load from. 9614218893Sdim // This triggers in things like "select bool X, 10.0, 123.0" after the FP 9615218893Sdim // constants have been dropped into the constant pool. 9616218893Sdim if (LHS.getOpcode() == ISD::LOAD) { 9617218893Sdim LoadSDNode *LLD = cast<LoadSDNode>(LHS); 9618218893Sdim LoadSDNode *RLD = cast<LoadSDNode>(RHS); 9619218893Sdim 9620218893Sdim // Token chains must be identical. 9621218893Sdim if (LHS.getOperand(0) != RHS.getOperand(0) || 9622193323Sed // Do not let this transformation reduce the number of volatile loads. 9623218893Sdim LLD->isVolatile() || RLD->isVolatile() || 9624218893Sdim // If this is an EXTLOAD, the VT's must match. 9625218893Sdim LLD->getMemoryVT() != RLD->getMemoryVT() || 9626218893Sdim // If this is an EXTLOAD, the kind of extension must match. 9627218893Sdim (LLD->getExtensionType() != RLD->getExtensionType() && 9628218893Sdim // The only exception is if one of the extensions is anyext. 9629218893Sdim LLD->getExtensionType() != ISD::EXTLOAD && 9630218893Sdim RLD->getExtensionType() != ISD::EXTLOAD) || 9631198892Srdivacky // FIXME: this discards src value information. This is 9632198892Srdivacky // over-conservative. It would be beneficial to be able to remember 9633202375Srdivacky // both potential memory locations. Since we are discarding 9634202375Srdivacky // src value info, don't do the transformation if the memory 9635202375Srdivacky // locations are not in the default address space. 9636218893Sdim LLD->getPointerInfo().getAddrSpace() != 0 || 9637249423Sdim RLD->getPointerInfo().getAddrSpace() != 0 || 9638249423Sdim !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), 9639249423Sdim LLD->getBasePtr().getValueType())) 9640218893Sdim return false; 9641193323Sed 9642218893Sdim // Check that the select condition doesn't reach either load. If so, 9643218893Sdim // folding this will induce a cycle into the DAG. If not, this is safe to 9644218893Sdim // xform, so create a select of the addresses. 9645218893Sdim SDValue Addr; 9646218893Sdim if (TheSelect->getOpcode() == ISD::SELECT) { 9647218893Sdim SDNode *CondNode = TheSelect->getOperand(0).getNode(); 9648218893Sdim if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 9649218893Sdim (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 9650218893Sdim return false; 9651243830Sdim // The loads must not depend on one another. 9652243830Sdim if (LLD->isPredecessorOf(RLD) || 9653243830Sdim RLD->isPredecessorOf(LLD)) 9654243830Sdim return false; 9655218893Sdim Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(), 9656218893Sdim LLD->getBasePtr().getValueType(), 9657218893Sdim TheSelect->getOperand(0), LLD->getBasePtr(), 9658218893Sdim RLD->getBasePtr()); 9659218893Sdim } else { // Otherwise SELECT_CC 9660218893Sdim SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 9661218893Sdim SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 9662193323Sed 9663218893Sdim if ((LLD->hasAnyUseOfValue(1) && 9664218893Sdim (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 9665234353Sdim (RLD->hasAnyUseOfValue(1) && 9666234353Sdim (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 9667218893Sdim return false; 9668193323Sed 9669218893Sdim Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(), 9670218893Sdim LLD->getBasePtr().getValueType(), 9671218893Sdim TheSelect->getOperand(0), 9672218893Sdim TheSelect->getOperand(1), 9673218893Sdim LLD->getBasePtr(), RLD->getBasePtr(), 9674218893Sdim TheSelect->getOperand(4)); 9675193323Sed } 9676218893Sdim 9677218893Sdim SDValue Load; 9678218893Sdim if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 9679218893Sdim Load = DAG.getLoad(TheSelect->getValueType(0), 9680218893Sdim TheSelect->getDebugLoc(), 9681218893Sdim // FIXME: Discards pointer info. 9682218893Sdim LLD->getChain(), Addr, MachinePointerInfo(), 9683218893Sdim LLD->isVolatile(), LLD->isNonTemporal(), 9684234353Sdim LLD->isInvariant(), LLD->getAlignment()); 9685218893Sdim } else { 9686218893Sdim Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ? 9687218893Sdim RLD->getExtensionType() : LLD->getExtensionType(), 9688218893Sdim TheSelect->getDebugLoc(), 9689218893Sdim TheSelect->getValueType(0), 9690218893Sdim // FIXME: Discards pointer info. 9691218893Sdim LLD->getChain(), Addr, MachinePointerInfo(), 9692218893Sdim LLD->getMemoryVT(), LLD->isVolatile(), 9693218893Sdim LLD->isNonTemporal(), LLD->getAlignment()); 9694218893Sdim } 9695218893Sdim 9696218893Sdim // Users of the select now use the result of the load. 9697218893Sdim CombineTo(TheSelect, Load); 9698218893Sdim 9699218893Sdim // Users of the old loads now use the new load's chain. We know the 9700218893Sdim // old-load value is dead now. 9701218893Sdim CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 9702218893Sdim CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 9703218893Sdim return true; 9704193323Sed } 9705193323Sed 9706193323Sed return false; 9707193323Sed} 9708193323Sed 9709193323Sed/// SimplifySelectCC - Simplify an expression of the form (N0 cond N1) ? N2 : N3 9710193323Sed/// where 'cond' is the comparison specified by CC. 9711193323SedSDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, 9712193323Sed SDValue N2, SDValue N3, 9713193323Sed ISD::CondCode CC, bool NotExtCompare) { 9714193323Sed // (x ? y : y) -> y. 9715193323Sed if (N2 == N3) return N2; 9716218893Sdim 9717198090Srdivacky EVT VT = N2.getValueType(); 9718193323Sed ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 9719193323Sed ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 9720193323Sed ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode()); 9721193323Sed 9722193323Sed // Determine if the condition we're dealing with is constant 9723193323Sed SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()), 9724193323Sed N0, N1, CC, DL, false); 9725193323Sed if (SCC.getNode()) AddToWorkList(SCC.getNode()); 9726193323Sed ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode()); 9727193323Sed 9728193323Sed // fold select_cc true, x, y -> x 9729193323Sed if (SCCC && !SCCC->isNullValue()) 9730193323Sed return N2; 9731193323Sed // fold select_cc false, x, y -> y 9732193323Sed if (SCCC && SCCC->isNullValue()) 9733193323Sed return N3; 9734193323Sed 9735193323Sed // Check to see if we can simplify the select into an fabs node 9736193323Sed if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 9737193323Sed // Allow either -0.0 or 0.0 9738193323Sed if (CFP->getValueAPF().isZero()) { 9739193323Sed // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 9740193323Sed if ((CC == ISD::SETGE || CC == ISD::SETGT) && 9741193323Sed N0 == N2 && N3.getOpcode() == ISD::FNEG && 9742193323Sed N2 == N3.getOperand(0)) 9743193323Sed return DAG.getNode(ISD::FABS, DL, VT, N0); 9744193323Sed 9745193323Sed // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 9746193323Sed if ((CC == ISD::SETLT || CC == ISD::SETLE) && 9747193323Sed N0 == N3 && N2.getOpcode() == ISD::FNEG && 9748193323Sed N2.getOperand(0) == N3) 9749193323Sed return DAG.getNode(ISD::FABS, DL, VT, N3); 9750193323Sed } 9751193323Sed } 9752218893Sdim 9753193323Sed // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 9754193323Sed // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 9755193323Sed // in it. This is a win when the constant is not otherwise available because 9756193323Sed // it replaces two constant pool loads with one. We only do this if the FP 9757193323Sed // type is known to be legal, because if it isn't, then we are before legalize 9758193323Sed // types an we want the other legalization to happen first (e.g. to avoid 9759193323Sed // messing with soft float) and if the ConstantFP is not legal, because if 9760193323Sed // it is legal, we may not need to store the FP constant in a constant pool. 9761193323Sed if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 9762193323Sed if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 9763193323Sed if (TLI.isTypeLegal(N2.getValueType()) && 9764193323Sed (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 9765193323Sed TargetLowering::Legal) && 9766193323Sed // If both constants have multiple uses, then we won't need to do an 9767193323Sed // extra load, they are likely around in registers for other users. 9768193323Sed (TV->hasOneUse() || FV->hasOneUse())) { 9769193323Sed Constant *Elts[] = { 9770193323Sed const_cast<ConstantFP*>(FV->getConstantFPValue()), 9771193323Sed const_cast<ConstantFP*>(TV->getConstantFPValue()) 9772193323Sed }; 9773226633Sdim Type *FPTy = Elts[0]->getType(); 9774243830Sdim const DataLayout &TD = *TLI.getDataLayout(); 9775218893Sdim 9776193323Sed // Create a ConstantArray of the two constants. 9777224145Sdim Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 9778193323Sed SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(), 9779193323Sed TD.getPrefTypeAlignment(FPTy)); 9780193323Sed unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 9781193323Sed 9782193323Sed // Get the offsets to the 0 and 1 element of the array so that we can 9783193323Sed // select between them. 9784193323Sed SDValue Zero = DAG.getIntPtrConstant(0); 9785193323Sed unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 9786193323Sed SDValue One = DAG.getIntPtrConstant(EltSize); 9787218893Sdim 9788193323Sed SDValue Cond = DAG.getSetCC(DL, 9789193323Sed TLI.getSetCCResultType(N0.getValueType()), 9790193323Sed N0, N1, CC); 9791226633Sdim AddToWorkList(Cond.getNode()); 9792193323Sed SDValue CstOffset = DAG.getNode(ISD::SELECT, DL, Zero.getValueType(), 9793193323Sed Cond, One, Zero); 9794226633Sdim AddToWorkList(CstOffset.getNode()); 9795193323Sed CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx, 9796193323Sed CstOffset); 9797226633Sdim AddToWorkList(CPIdx.getNode()); 9798193323Sed return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 9799218893Sdim MachinePointerInfo::getConstantPool(), false, 9800234353Sdim false, false, Alignment); 9801193323Sed 9802193323Sed } 9803218893Sdim } 9804193323Sed 9805193323Sed // Check to see if we can perform the "gzip trick", transforming 9806193323Sed // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A) 9807193323Sed if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT && 9808193323Sed (N1C->isNullValue() || // (a < 0) ? b : 0 9809193323Sed (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0 9810198090Srdivacky EVT XType = N0.getValueType(); 9811198090Srdivacky EVT AType = N2.getValueType(); 9812193323Sed if (XType.bitsGE(AType)) { 9813193323Sed // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a 9814193323Sed // single-bit constant. 9815193323Sed if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { 9816193323Sed unsigned ShCtV = N2C->getAPIntValue().logBase2(); 9817193323Sed ShCtV = XType.getSizeInBits()-ShCtV-1; 9818219077Sdim SDValue ShCt = DAG.getConstant(ShCtV, 9819219077Sdim getShiftAmountTy(N0.getValueType())); 9820193323Sed SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), 9821193323Sed XType, N0, ShCt); 9822193323Sed AddToWorkList(Shift.getNode()); 9823193323Sed 9824193323Sed if (XType.bitsGT(AType)) { 9825193323Sed Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 9826193323Sed AddToWorkList(Shift.getNode()); 9827193323Sed } 9828193323Sed 9829193323Sed return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 9830193323Sed } 9831193323Sed 9832193323Sed SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), 9833193323Sed XType, N0, 9834193323Sed DAG.getConstant(XType.getSizeInBits()-1, 9835219077Sdim getShiftAmountTy(N0.getValueType()))); 9836193323Sed AddToWorkList(Shift.getNode()); 9837193323Sed 9838193323Sed if (XType.bitsGT(AType)) { 9839193323Sed Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 9840193323Sed AddToWorkList(Shift.getNode()); 9841193323Sed } 9842193323Sed 9843193323Sed return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 9844193323Sed } 9845193323Sed } 9846193323Sed 9847218893Sdim // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 9848218893Sdim // where y is has a single bit set. 9849218893Sdim // A plaintext description would be, we can turn the SELECT_CC into an AND 9850218893Sdim // when the condition can be materialized as an all-ones register. Any 9851218893Sdim // single bit-test can be materialized as an all-ones register with 9852218893Sdim // shift-left and shift-right-arith. 9853218893Sdim if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 9854218893Sdim N0->getValueType(0) == VT && 9855218893Sdim N1C && N1C->isNullValue() && 9856218893Sdim N2C && N2C->isNullValue()) { 9857218893Sdim SDValue AndLHS = N0->getOperand(0); 9858218893Sdim ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 9859218893Sdim if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 9860218893Sdim // Shift the tested bit over the sign bit. 9861218893Sdim APInt AndMask = ConstAndRHS->getAPIntValue(); 9862218893Sdim SDValue ShlAmt = 9863219077Sdim DAG.getConstant(AndMask.countLeadingZeros(), 9864219077Sdim getShiftAmountTy(AndLHS.getValueType())); 9865218893Sdim SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt); 9866218893Sdim 9867218893Sdim // Now arithmetic right shift it all the way over, so the result is either 9868218893Sdim // all-ones, or zero. 9869218893Sdim SDValue ShrAmt = 9870219077Sdim DAG.getConstant(AndMask.getBitWidth()-1, 9871219077Sdim getShiftAmountTy(Shl.getValueType())); 9872218893Sdim SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt); 9873218893Sdim 9874218893Sdim return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 9875218893Sdim } 9876218893Sdim } 9877218893Sdim 9878193323Sed // fold select C, 16, 0 -> shl C, 4 9879193323Sed if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() && 9880226633Sdim TLI.getBooleanContents(N0.getValueType().isVector()) == 9881226633Sdim TargetLowering::ZeroOrOneBooleanContent) { 9882193323Sed 9883193323Sed // If the caller doesn't want us to simplify this into a zext of a compare, 9884193323Sed // don't do it. 9885193323Sed if (NotExtCompare && N2C->getAPIntValue() == 1) 9886193323Sed return SDValue(); 9887193323Sed 9888193323Sed // Get a SetCC of the condition 9889243830Sdim // NOTE: Don't create a SETCC if it's not legal on this target. 9890243830Sdim if (!LegalOperations || 9891243830Sdim TLI.isOperationLegal(ISD::SETCC, 9892243830Sdim LegalTypes ? TLI.getSetCCResultType(N0.getValueType()) : MVT::i1)) { 9893243830Sdim SDValue Temp, SCC; 9894243830Sdim // cast from setcc result type to select result type 9895243830Sdim if (LegalTypes) { 9896243830Sdim SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()), 9897243830Sdim N0, N1, CC); 9898243830Sdim if (N2.getValueType().bitsLT(SCC.getValueType())) 9899243830Sdim Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), 9900243830Sdim N2.getValueType()); 9901243830Sdim else 9902243830Sdim Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), 9903243830Sdim N2.getValueType(), SCC); 9904243830Sdim } else { 9905243830Sdim SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC); 9906193323Sed Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), 9907193323Sed N2.getValueType(), SCC); 9908243830Sdim } 9909193323Sed 9910243830Sdim AddToWorkList(SCC.getNode()); 9911243830Sdim AddToWorkList(Temp.getNode()); 9912193323Sed 9913243830Sdim if (N2C->getAPIntValue() == 1) 9914243830Sdim return Temp; 9915193323Sed 9916243830Sdim // shl setcc result by log2 n2c 9917243830Sdim return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp, 9918243830Sdim DAG.getConstant(N2C->getAPIntValue().logBase2(), 9919243830Sdim getShiftAmountTy(Temp.getValueType()))); 9920243830Sdim } 9921193323Sed } 9922193323Sed 9923193323Sed // Check to see if this is the equivalent of setcc 9924193323Sed // FIXME: Turn all of these into setcc if setcc if setcc is legal 9925193323Sed // otherwise, go ahead with the folds. 9926193323Sed if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) { 9927198090Srdivacky EVT XType = N0.getValueType(); 9928193323Sed if (!LegalOperations || 9929193323Sed TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) { 9930193323Sed SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC); 9931193323Sed if (Res.getValueType() != VT) 9932193323Sed Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res); 9933193323Sed return Res; 9934193323Sed } 9935193323Sed 9936193323Sed // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X)))) 9937193323Sed if (N1C && N1C->isNullValue() && CC == ISD::SETEQ && 9938193323Sed (!LegalOperations || 9939193323Sed TLI.isOperationLegal(ISD::CTLZ, XType))) { 9940193323Sed SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0); 9941193323Sed return DAG.getNode(ISD::SRL, DL, XType, Ctlz, 9942193323Sed DAG.getConstant(Log2_32(XType.getSizeInBits()), 9943219077Sdim getShiftAmountTy(Ctlz.getValueType()))); 9944193323Sed } 9945193323Sed // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1)) 9946193323Sed if (N1C && N1C->isNullValue() && CC == ISD::SETGT) { 9947193323Sed SDValue NegN0 = DAG.getNode(ISD::SUB, N0.getDebugLoc(), 9948193323Sed XType, DAG.getConstant(0, XType), N0); 9949193323Sed SDValue NotN0 = DAG.getNOT(N0.getDebugLoc(), N0, XType); 9950193323Sed return DAG.getNode(ISD::SRL, DL, XType, 9951193323Sed DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0), 9952193323Sed DAG.getConstant(XType.getSizeInBits()-1, 9953219077Sdim getShiftAmountTy(XType))); 9954193323Sed } 9955193323Sed // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1)) 9956193323Sed if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) { 9957193323Sed SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0, 9958193323Sed DAG.getConstant(XType.getSizeInBits()-1, 9959219077Sdim getShiftAmountTy(N0.getValueType()))); 9960193323Sed return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType)); 9961193323Sed } 9962193323Sed } 9963193323Sed 9964210299Sed // Check to see if this is an integer abs. 9965210299Sed // select_cc setg[te] X, 0, X, -X -> 9966210299Sed // select_cc setgt X, -1, X, -X -> 9967210299Sed // select_cc setl[te] X, 0, -X, X -> 9968210299Sed // select_cc setlt X, 1, -X, X -> 9969193323Sed // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 9970210299Sed if (N1C) { 9971210299Sed ConstantSDNode *SubC = NULL; 9972210299Sed if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 9973210299Sed (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 9974210299Sed N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 9975210299Sed SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 9976210299Sed else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 9977210299Sed (N1C->isOne() && CC == ISD::SETLT)) && 9978210299Sed N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 9979210299Sed SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 9980210299Sed 9981198090Srdivacky EVT XType = N0.getValueType(); 9982210299Sed if (SubC && SubC->isNullValue() && XType.isInteger()) { 9983210299Sed SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, 9984210299Sed N0, 9985210299Sed DAG.getConstant(XType.getSizeInBits()-1, 9986219077Sdim getShiftAmountTy(N0.getValueType()))); 9987210299Sed SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), 9988210299Sed XType, N0, Shift); 9989210299Sed AddToWorkList(Shift.getNode()); 9990210299Sed AddToWorkList(Add.getNode()); 9991210299Sed return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 9992193323Sed } 9993193323Sed } 9994193323Sed 9995193323Sed return SDValue(); 9996193323Sed} 9997193323Sed 9998193323Sed/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC. 9999198090SrdivackySDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, 10000193323Sed SDValue N1, ISD::CondCode Cond, 10001193323Sed DebugLoc DL, bool foldBooleans) { 10002193323Sed TargetLowering::DAGCombinerInfo 10003249423Sdim DagCombineInfo(DAG, Level, false, this); 10004193323Sed return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 10005193323Sed} 10006193323Sed 10007193323Sed/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 10008193323Sed/// return a DAG expression to select that will generate the same value by 10009193323Sed/// multiplying by a magic number. See: 10010193323Sed/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 10011193323SedSDValue DAGCombiner::BuildSDIV(SDNode *N) { 10012193323Sed std::vector<SDNode*> Built; 10013234353Sdim SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, &Built); 10014193323Sed 10015193323Sed for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); 10016193323Sed ii != ee; ++ii) 10017193323Sed AddToWorkList(*ii); 10018193323Sed return S; 10019193323Sed} 10020193323Sed 10021193323Sed/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 10022193323Sed/// return a DAG expression to select that will generate the same value by 10023193323Sed/// multiplying by a magic number. See: 10024193323Sed/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 10025193323SedSDValue DAGCombiner::BuildUDIV(SDNode *N) { 10026193323Sed std::vector<SDNode*> Built; 10027234353Sdim SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, &Built); 10028193323Sed 10029193323Sed for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); 10030193323Sed ii != ee; ++ii) 10031193323Sed AddToWorkList(*ii); 10032193323Sed return S; 10033193323Sed} 10034193323Sed 10035198090Srdivacky/// FindBaseOffset - Return true if base is a frame index, which is known not 10036218893Sdim// to alias with anything but itself. Provides base object and offset as 10037218893Sdim// results. 10038198090Srdivackystatic bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 10039243830Sdim const GlobalValue *&GV, const void *&CV) { 10040193323Sed // Assume it is a primitive operation. 10041198090Srdivacky Base = Ptr; Offset = 0; GV = 0; CV = 0; 10042193323Sed 10043193323Sed // If it's an adding a simple constant then integrate the offset. 10044193323Sed if (Base.getOpcode() == ISD::ADD) { 10045193323Sed if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 10046193323Sed Base = Base.getOperand(0); 10047193323Sed Offset += C->getZExtValue(); 10048193323Sed } 10049193323Sed } 10050218893Sdim 10051198090Srdivacky // Return the underlying GlobalValue, and update the Offset. Return false 10052198090Srdivacky // for GlobalAddressSDNode since the same GlobalAddress may be represented 10053198090Srdivacky // by multiple nodes with different offsets. 10054198090Srdivacky if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 10055198090Srdivacky GV = G->getGlobal(); 10056198090Srdivacky Offset += G->getOffset(); 10057198090Srdivacky return false; 10058198090Srdivacky } 10059193323Sed 10060198090Srdivacky // Return the underlying Constant value, and update the Offset. Return false 10061198090Srdivacky // for ConstantSDNodes since the same constant pool entry may be represented 10062198090Srdivacky // by multiple nodes with different offsets. 10063198090Srdivacky if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 10064243830Sdim CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() 10065243830Sdim : (const void *)C->getConstVal(); 10066198090Srdivacky Offset += C->getOffset(); 10067198090Srdivacky return false; 10068198090Srdivacky } 10069193323Sed // If it's any of the following then it can't alias with anything but itself. 10070198090Srdivacky return isa<FrameIndexSDNode>(Base); 10071193323Sed} 10072193323Sed 10073193323Sed/// isAlias - Return true if there is any possibility that the two addresses 10074193323Sed/// overlap. 10075193323Sedbool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, 10076193323Sed const Value *SrcValue1, int SrcValueOffset1, 10077198090Srdivacky unsigned SrcValueAlign1, 10078218893Sdim const MDNode *TBAAInfo1, 10079193323Sed SDValue Ptr2, int64_t Size2, 10080198090Srdivacky const Value *SrcValue2, int SrcValueOffset2, 10081218893Sdim unsigned SrcValueAlign2, 10082218893Sdim const MDNode *TBAAInfo2) const { 10083193323Sed // If they are the same then they must be aliases. 10084193323Sed if (Ptr1 == Ptr2) return true; 10085193323Sed 10086193323Sed // Gather base node and offset information. 10087193323Sed SDValue Base1, Base2; 10088193323Sed int64_t Offset1, Offset2; 10089207618Srdivacky const GlobalValue *GV1, *GV2; 10090243830Sdim const void *CV1, *CV2; 10091198090Srdivacky bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1); 10092198090Srdivacky bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2); 10093193323Sed 10094198090Srdivacky // If they have a same base address then check to see if they overlap. 10095198090Srdivacky if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 10096193323Sed return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1); 10097193323Sed 10098218893Sdim // It is possible for different frame indices to alias each other, mostly 10099218893Sdim // when tail call optimization reuses return address slots for arguments. 10100218893Sdim // To catch this case, look up the actual index of frame indices to compute 10101218893Sdim // the real alias relationship. 10102218893Sdim if (isFrameIndex1 && isFrameIndex2) { 10103218893Sdim MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 10104218893Sdim Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 10105218893Sdim Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 10106218893Sdim return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1); 10107218893Sdim } 10108218893Sdim 10109218893Sdim // Otherwise, if we know what the bases are, and they aren't identical, then 10110218893Sdim // we know they cannot alias. 10111198090Srdivacky if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 10112198090Srdivacky return false; 10113193323Sed 10114198090Srdivacky // If we know required SrcValue1 and SrcValue2 have relatively large alignment 10115198090Srdivacky // compared to the size and offset of the access, we may be able to prove they 10116198090Srdivacky // do not alias. This check is conservative for now to catch cases created by 10117198090Srdivacky // splitting vector types. 10118198090Srdivacky if ((SrcValueAlign1 == SrcValueAlign2) && 10119198090Srdivacky (SrcValueOffset1 != SrcValueOffset2) && 10120198090Srdivacky (Size1 == Size2) && (SrcValueAlign1 > Size1)) { 10121198090Srdivacky int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1; 10122198090Srdivacky int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1; 10123218893Sdim 10124198090Srdivacky // There is no overlap between these relatively aligned accesses of similar 10125198090Srdivacky // size, return no alias. 10126198090Srdivacky if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1) 10127198090Srdivacky return false; 10128198090Srdivacky } 10129218893Sdim 10130193323Sed if (CombinerGlobalAA) { 10131193323Sed // Use alias analysis information. 10132193323Sed int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2); 10133193323Sed int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset; 10134193323Sed int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset; 10135193323Sed AliasAnalysis::AliasResult AAResult = 10136218893Sdim AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1), 10137218893Sdim AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2)); 10138193323Sed if (AAResult == AliasAnalysis::NoAlias) 10139193323Sed return false; 10140193323Sed } 10141193323Sed 10142193323Sed // Otherwise we have to assume they alias. 10143193323Sed return true; 10144193323Sed} 10145193323Sed 10146249423Sdimbool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) { 10147249423Sdim SDValue Ptr0, Ptr1; 10148249423Sdim int64_t Size0, Size1; 10149249423Sdim const Value *SrcValue0, *SrcValue1; 10150249423Sdim int SrcValueOffset0, SrcValueOffset1; 10151249423Sdim unsigned SrcValueAlign0, SrcValueAlign1; 10152249423Sdim const MDNode *SrcTBAAInfo0, *SrcTBAAInfo1; 10153249423Sdim FindAliasInfo(Op0, Ptr0, Size0, SrcValue0, SrcValueOffset0, 10154249423Sdim SrcValueAlign0, SrcTBAAInfo0); 10155249423Sdim FindAliasInfo(Op1, Ptr1, Size1, SrcValue1, SrcValueOffset1, 10156249423Sdim SrcValueAlign1, SrcTBAAInfo1); 10157249423Sdim return isAlias(Ptr0, Size0, SrcValue0, SrcValueOffset0, 10158249423Sdim SrcValueAlign0, SrcTBAAInfo0, 10159249423Sdim Ptr1, Size1, SrcValue1, SrcValueOffset1, 10160249423Sdim SrcValueAlign1, SrcTBAAInfo1); 10161249423Sdim} 10162249423Sdim 10163193323Sed/// FindAliasInfo - Extracts the relevant alias information from the memory 10164193323Sed/// node. Returns true if the operand was a load. 10165193323Sedbool DAGCombiner::FindAliasInfo(SDNode *N, 10166234353Sdim SDValue &Ptr, int64_t &Size, 10167234353Sdim const Value *&SrcValue, 10168234353Sdim int &SrcValueOffset, 10169234353Sdim unsigned &SrcValueAlign, 10170234353Sdim const MDNode *&TBAAInfo) const { 10171234353Sdim LSBaseSDNode *LS = cast<LSBaseSDNode>(N); 10172234353Sdim 10173234353Sdim Ptr = LS->getBasePtr(); 10174234353Sdim Size = LS->getMemoryVT().getSizeInBits() >> 3; 10175234353Sdim SrcValue = LS->getSrcValue(); 10176234353Sdim SrcValueOffset = LS->getSrcValueOffset(); 10177234353Sdim SrcValueAlign = LS->getOriginalAlignment(); 10178234353Sdim TBAAInfo = LS->getTBAAInfo(); 10179234353Sdim return isa<LoadSDNode>(LS); 10180193323Sed} 10181193323Sed 10182193323Sed/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 10183193323Sed/// looking for aliasing nodes and adding them to the Aliases vector. 10184193323Sedvoid DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 10185193323Sed SmallVector<SDValue, 8> &Aliases) { 10186193323Sed SmallVector<SDValue, 8> Chains; // List of chains to visit. 10187198090Srdivacky SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 10188193323Sed 10189193323Sed // Get alias information for node. 10190193323Sed SDValue Ptr; 10191198090Srdivacky int64_t Size; 10192198090Srdivacky const Value *SrcValue; 10193198090Srdivacky int SrcValueOffset; 10194198090Srdivacky unsigned SrcValueAlign; 10195218893Sdim const MDNode *SrcTBAAInfo; 10196218893Sdim bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset, 10197218893Sdim SrcValueAlign, SrcTBAAInfo); 10198193323Sed 10199193323Sed // Starting off. 10200193323Sed Chains.push_back(OriginalChain); 10201198090Srdivacky unsigned Depth = 0; 10202218893Sdim 10203193323Sed // Look at each chain and determine if it is an alias. If so, add it to the 10204193323Sed // aliases list. If not, then continue up the chain looking for the next 10205193323Sed // candidate. 10206193323Sed while (!Chains.empty()) { 10207193323Sed SDValue Chain = Chains.back(); 10208193323Sed Chains.pop_back(); 10209218893Sdim 10210218893Sdim // For TokenFactor nodes, look at each operand and only continue up the 10211218893Sdim // chain until we find two aliases. If we've seen two aliases, assume we'll 10212198090Srdivacky // find more and revert to original chain since the xform is unlikely to be 10213198090Srdivacky // profitable. 10214218893Sdim // 10215218893Sdim // FIXME: The depth check could be made to return the last non-aliasing 10216198090Srdivacky // chain we found before we hit a tokenfactor rather than the original 10217198090Srdivacky // chain. 10218198090Srdivacky if (Depth > 6 || Aliases.size() == 2) { 10219198090Srdivacky Aliases.clear(); 10220198090Srdivacky Aliases.push_back(OriginalChain); 10221198090Srdivacky break; 10222198090Srdivacky } 10223193323Sed 10224198090Srdivacky // Don't bother if we've been before. 10225198090Srdivacky if (!Visited.insert(Chain.getNode())) 10226198090Srdivacky continue; 10227193323Sed 10228193323Sed switch (Chain.getOpcode()) { 10229193323Sed case ISD::EntryToken: 10230193323Sed // Entry token is ideal chain operand, but handled in FindBetterChain. 10231193323Sed break; 10232193323Sed 10233193323Sed case ISD::LOAD: 10234193323Sed case ISD::STORE: { 10235193323Sed // Get alias information for Chain. 10236193323Sed SDValue OpPtr; 10237198090Srdivacky int64_t OpSize; 10238198090Srdivacky const Value *OpSrcValue; 10239198090Srdivacky int OpSrcValueOffset; 10240198090Srdivacky unsigned OpSrcValueAlign; 10241218893Sdim const MDNode *OpSrcTBAAInfo; 10242193323Sed bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize, 10243198090Srdivacky OpSrcValue, OpSrcValueOffset, 10244218893Sdim OpSrcValueAlign, 10245218893Sdim OpSrcTBAAInfo); 10246193323Sed 10247193323Sed // If chain is alias then stop here. 10248193323Sed if (!(IsLoad && IsOpLoad) && 10249198090Srdivacky isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign, 10250218893Sdim SrcTBAAInfo, 10251198090Srdivacky OpPtr, OpSize, OpSrcValue, OpSrcValueOffset, 10252218893Sdim OpSrcValueAlign, OpSrcTBAAInfo)) { 10253193323Sed Aliases.push_back(Chain); 10254193323Sed } else { 10255193323Sed // Look further up the chain. 10256193323Sed Chains.push_back(Chain.getOperand(0)); 10257198090Srdivacky ++Depth; 10258193323Sed } 10259193323Sed break; 10260193323Sed } 10261193323Sed 10262193323Sed case ISD::TokenFactor: 10263198090Srdivacky // We have to check each of the operands of the token factor for "small" 10264198090Srdivacky // token factors, so we queue them up. Adding the operands to the queue 10265198090Srdivacky // (stack) in reverse order maintains the original order and increases the 10266198090Srdivacky // likelihood that getNode will find a matching token factor (CSE.) 10267198090Srdivacky if (Chain.getNumOperands() > 16) { 10268198090Srdivacky Aliases.push_back(Chain); 10269198090Srdivacky break; 10270198090Srdivacky } 10271193323Sed for (unsigned n = Chain.getNumOperands(); n;) 10272193323Sed Chains.push_back(Chain.getOperand(--n)); 10273198090Srdivacky ++Depth; 10274193323Sed break; 10275193323Sed 10276193323Sed default: 10277193323Sed // For all other instructions we will just have to take what we can get. 10278193323Sed Aliases.push_back(Chain); 10279193323Sed break; 10280193323Sed } 10281193323Sed } 10282193323Sed} 10283193323Sed 10284193323Sed/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking 10285193323Sed/// for a better chain (aliasing node.) 10286193323SedSDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 10287193323Sed SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 10288193323Sed 10289193323Sed // Accumulate all the aliases to this node. 10290193323Sed GatherAllAliases(N, OldChain, Aliases); 10291193323Sed 10292223017Sdim // If no operands then chain to entry token. 10293223017Sdim if (Aliases.size() == 0) 10294193323Sed return DAG.getEntryNode(); 10295223017Sdim 10296223017Sdim // If a single operand then chain to it. We don't need to revisit it. 10297223017Sdim if (Aliases.size() == 1) 10298193323Sed return Aliases[0]; 10299218893Sdim 10300193323Sed // Construct a custom tailored token factor. 10301218893Sdim return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other, 10302198090Srdivacky &Aliases[0], Aliases.size()); 10303193323Sed} 10304193323Sed 10305193323Sed// SelectionDAG::Combine - This is the entry point for the file. 10306193323Sed// 10307193323Sedvoid SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 10308193323Sed CodeGenOpt::Level OptLevel) { 10309193323Sed /// run - This is the main entry point to this class. 10310193323Sed /// 10311193323Sed DAGCombiner(*this, AA, OptLevel).Run(Level); 10312193323Sed} 10313