1193323Sed//===- Reassociate.cpp - Reassociate binary expressions -------------------===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This pass reassociates commutative expressions in an order that is designed 11201360Srdivacky// to promote better constant propagation, GCSE, LICM, PRE, etc. 12193323Sed// 13193323Sed// For example: 4 + (x + 5) -> x + (4 + 5) 14193323Sed// 15193323Sed// In the implementation of this algorithm, constants are assigned rank = 0, 16193323Sed// function arguments are rank = 1, and other values are assigned ranks 17193323Sed// corresponding to the reverse post order traversal of current function 18193323Sed// (starting at 2), which effectively gives values in deep loops higher rank 19193323Sed// than values not in loops. 20193323Sed// 21193323Sed//===----------------------------------------------------------------------===// 22193323Sed 23193323Sed#define DEBUG_TYPE "reassociate" 24193323Sed#include "llvm/Transforms/Scalar.h" 25239462Sdim#include "llvm/ADT/DenseMap.h" 26239462Sdim#include "llvm/ADT/PostOrderIterator.h" 27239462Sdim#include "llvm/ADT/STLExtras.h" 28239462Sdim#include "llvm/ADT/SetVector.h" 29239462Sdim#include "llvm/ADT/Statistic.h" 30193323Sed#include "llvm/Assembly/Writer.h" 31249423Sdim#include "llvm/IR/Constants.h" 32249423Sdim#include "llvm/IR/DerivedTypes.h" 33249423Sdim#include "llvm/IR/Function.h" 34249423Sdim#include "llvm/IR/IRBuilder.h" 35249423Sdim#include "llvm/IR/Instructions.h" 36249423Sdim#include "llvm/IR/IntrinsicInst.h" 37249423Sdim#include "llvm/Pass.h" 38193323Sed#include "llvm/Support/CFG.h" 39193323Sed#include "llvm/Support/Debug.h" 40193323Sed#include "llvm/Support/ValueHandle.h" 41198090Srdivacky#include "llvm/Support/raw_ostream.h" 42249423Sdim#include "llvm/Transforms/Utils/Local.h" 43193323Sed#include <algorithm> 44193323Sedusing namespace llvm; 45193323Sed 46193323SedSTATISTIC(NumChanged, "Number of insts reassociated"); 47193323SedSTATISTIC(NumAnnihil, "Number of expr tree annihilated"); 48193323SedSTATISTIC(NumFactor , "Number of multiplies factored"); 49193323Sed 50193323Sednamespace { 51198090Srdivacky struct ValueEntry { 52193323Sed unsigned Rank; 53193323Sed Value *Op; 54193323Sed ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {} 55193323Sed }; 56193323Sed inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) { 57193323Sed return LHS.Rank > RHS.Rank; // Sort so that highest rank goes to start. 58193323Sed } 59193323Sed} 60193323Sed 61193323Sed#ifndef NDEBUG 62193323Sed/// PrintOps - Print out the expression identified in the Ops list. 63193323Sed/// 64201360Srdivackystatic void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) { 65193323Sed Module *M = I->getParent()->getParent()->getParent(); 66202375Srdivacky dbgs() << Instruction::getOpcodeName(I->getOpcode()) << " " 67201360Srdivacky << *Ops[0].Op->getType() << '\t'; 68193323Sed for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 69202375Srdivacky dbgs() << "[ "; 70202375Srdivacky WriteAsOperand(dbgs(), Ops[i].Op, false, M); 71202375Srdivacky dbgs() << ", #" << Ops[i].Rank << "] "; 72193323Sed } 73193323Sed} 74193323Sed#endif 75239462Sdim 76193323Sednamespace { 77239462Sdim /// \brief Utility class representing a base and exponent pair which form one 78239462Sdim /// factor of some product. 79239462Sdim struct Factor { 80239462Sdim Value *Base; 81239462Sdim unsigned Power; 82239462Sdim 83239462Sdim Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {} 84239462Sdim 85239462Sdim /// \brief Sort factors by their Base. 86239462Sdim struct BaseSorter { 87239462Sdim bool operator()(const Factor &LHS, const Factor &RHS) { 88239462Sdim return LHS.Base < RHS.Base; 89239462Sdim } 90239462Sdim }; 91239462Sdim 92239462Sdim /// \brief Compare factors for equal bases. 93239462Sdim struct BaseEqual { 94239462Sdim bool operator()(const Factor &LHS, const Factor &RHS) { 95239462Sdim return LHS.Base == RHS.Base; 96239462Sdim } 97239462Sdim }; 98239462Sdim 99239462Sdim /// \brief Sort factors in descending order by their power. 100239462Sdim struct PowerDescendingSorter { 101239462Sdim bool operator()(const Factor &LHS, const Factor &RHS) { 102239462Sdim return LHS.Power > RHS.Power; 103239462Sdim } 104239462Sdim }; 105239462Sdim 106239462Sdim /// \brief Compare factors for equal powers. 107239462Sdim struct PowerEqual { 108239462Sdim bool operator()(const Factor &LHS, const Factor &RHS) { 109239462Sdim return LHS.Power == RHS.Power; 110239462Sdim } 111239462Sdim }; 112239462Sdim }; 113249423Sdim 114249423Sdim /// Utility class representing a non-constant Xor-operand. We classify 115249423Sdim /// non-constant Xor-Operands into two categories: 116249423Sdim /// C1) The operand is in the form "X & C", where C is a constant and C != ~0 117249423Sdim /// C2) 118249423Sdim /// C2.1) The operand is in the form of "X | C", where C is a non-zero 119249423Sdim /// constant. 120249423Sdim /// C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this 121249423Sdim /// operand as "E | 0" 122249423Sdim class XorOpnd { 123249423Sdim public: 124249423Sdim XorOpnd(Value *V); 125249423Sdim const XorOpnd &operator=(const XorOpnd &That); 126249423Sdim 127249423Sdim bool isInvalid() const { return SymbolicPart == 0; } 128249423Sdim bool isOrExpr() const { return isOr; } 129249423Sdim Value *getValue() const { return OrigVal; } 130249423Sdim Value *getSymbolicPart() const { return SymbolicPart; } 131249423Sdim unsigned getSymbolicRank() const { return SymbolicRank; } 132249423Sdim const APInt &getConstPart() const { return ConstPart; } 133249423Sdim 134249423Sdim void Invalidate() { SymbolicPart = OrigVal = 0; } 135249423Sdim void setSymbolicRank(unsigned R) { SymbolicRank = R; } 136249423Sdim 137249423Sdim // Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank. 138249423Sdim // The purpose is twofold: 139249423Sdim // 1) Cluster together the operands sharing the same symbolic-value. 140249423Sdim // 2) Operand having smaller symbolic-value-rank is permuted earlier, which 141249423Sdim // could potentially shorten crital path, and expose more loop-invariants. 142249423Sdim // Note that values' rank are basically defined in RPO order (FIXME). 143249423Sdim // So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier 144249423Sdim // than Y which is defined earlier than Z. Permute "x | 1", "Y & 2", 145249423Sdim // "z" in the order of X-Y-Z is better than any other orders. 146251662Sdim struct PtrSortFunctor { 147251662Sdim bool operator()(XorOpnd * const &LHS, XorOpnd * const &RHS) { 148251662Sdim return LHS->getSymbolicRank() < RHS->getSymbolicRank(); 149249423Sdim } 150249423Sdim }; 151249423Sdim private: 152249423Sdim Value *OrigVal; 153249423Sdim Value *SymbolicPart; 154249423Sdim APInt ConstPart; 155249423Sdim unsigned SymbolicRank; 156249423Sdim bool isOr; 157249423Sdim }; 158239462Sdim} 159239462Sdim 160239462Sdimnamespace { 161198090Srdivacky class Reassociate : public FunctionPass { 162201360Srdivacky DenseMap<BasicBlock*, unsigned> RankMap; 163234353Sdim DenseMap<AssertingVH<Value>, unsigned> ValueRankMap; 164239462Sdim SetVector<AssertingVH<Instruction> > RedoInsts; 165193323Sed bool MadeChange; 166193323Sed public: 167193323Sed static char ID; // Pass identification, replacement for typeid 168218893Sdim Reassociate() : FunctionPass(ID) { 169218893Sdim initializeReassociatePass(*PassRegistry::getPassRegistry()); 170218893Sdim } 171193323Sed 172193323Sed bool runOnFunction(Function &F); 173193323Sed 174193323Sed virtual void getAnalysisUsage(AnalysisUsage &AU) const { 175193323Sed AU.setPreservesCFG(); 176193323Sed } 177193323Sed private: 178193323Sed void BuildRankMap(Function &F); 179193323Sed unsigned getRank(Value *V); 180239462Sdim void ReassociateExpression(BinaryOperator *I); 181239462Sdim void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); 182201360Srdivacky Value *OptimizeExpression(BinaryOperator *I, 183201360Srdivacky SmallVectorImpl<ValueEntry> &Ops); 184201360Srdivacky Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); 185249423Sdim Value *OptimizeXor(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); 186249423Sdim bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, APInt &ConstOpnd, 187249423Sdim Value *&Res); 188249423Sdim bool CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, 189249423Sdim APInt &ConstOpnd, Value *&Res); 190239462Sdim bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, 191239462Sdim SmallVectorImpl<Factor> &Factors); 192239462Sdim Value *buildMinimalMultiplyDAG(IRBuilder<> &Builder, 193239462Sdim SmallVectorImpl<Factor> &Factors); 194239462Sdim Value *OptimizeMul(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); 195193323Sed Value *RemoveFactorFromExpression(Value *V, Value *Factor); 196239462Sdim void EraseInst(Instruction *I); 197239462Sdim void OptimizeInst(Instruction *I); 198193323Sed }; 199193323Sed} 200193323Sed 201249423SdimXorOpnd::XorOpnd(Value *V) { 202249423Sdim assert(!isa<ConstantInt>(V) && "No ConstantInt"); 203249423Sdim OrigVal = V; 204249423Sdim Instruction *I = dyn_cast<Instruction>(V); 205249423Sdim SymbolicRank = 0; 206249423Sdim 207249423Sdim if (I && (I->getOpcode() == Instruction::Or || 208249423Sdim I->getOpcode() == Instruction::And)) { 209249423Sdim Value *V0 = I->getOperand(0); 210249423Sdim Value *V1 = I->getOperand(1); 211249423Sdim if (isa<ConstantInt>(V0)) 212249423Sdim std::swap(V0, V1); 213249423Sdim 214249423Sdim if (ConstantInt *C = dyn_cast<ConstantInt>(V1)) { 215249423Sdim ConstPart = C->getValue(); 216249423Sdim SymbolicPart = V0; 217249423Sdim isOr = (I->getOpcode() == Instruction::Or); 218249423Sdim return; 219249423Sdim } 220249423Sdim } 221249423Sdim 222249423Sdim // view the operand as "V | 0" 223249423Sdim SymbolicPart = V; 224249423Sdim ConstPart = APInt::getNullValue(V->getType()->getIntegerBitWidth()); 225249423Sdim isOr = true; 226249423Sdim} 227249423Sdim 228249423Sdimconst XorOpnd &XorOpnd::operator=(const XorOpnd &That) { 229249423Sdim OrigVal = That.OrigVal; 230249423Sdim SymbolicPart = That.SymbolicPart; 231249423Sdim ConstPart = That.ConstPart; 232249423Sdim SymbolicRank = That.SymbolicRank; 233249423Sdim isOr = That.isOr; 234249423Sdim return *this; 235249423Sdim} 236249423Sdim 237193323Sedchar Reassociate::ID = 0; 238212904SdimINITIALIZE_PASS(Reassociate, "reassociate", 239218893Sdim "Reassociate expressions", false, false) 240193323Sed 241193323Sed// Public interface to the Reassociate pass 242193323SedFunctionPass *llvm::createReassociatePass() { return new Reassociate(); } 243193323Sed 244239462Sdim/// isReassociableOp - Return true if V is an instruction of the specified 245239462Sdim/// opcode and if it only has one use. 246239462Sdimstatic BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { 247239462Sdim if (V->hasOneUse() && isa<Instruction>(V) && 248239462Sdim cast<Instruction>(V)->getOpcode() == Opcode) 249239462Sdim return cast<BinaryOperator>(V); 250239462Sdim return 0; 251193323Sed} 252193323Sed 253193323Sedstatic bool isUnmovableInstruction(Instruction *I) { 254193323Sed if (I->getOpcode() == Instruction::PHI || 255239462Sdim I->getOpcode() == Instruction::LandingPad || 256193323Sed I->getOpcode() == Instruction::Alloca || 257193323Sed I->getOpcode() == Instruction::Load || 258193323Sed I->getOpcode() == Instruction::Invoke || 259193323Sed (I->getOpcode() == Instruction::Call && 260193323Sed !isa<DbgInfoIntrinsic>(I)) || 261239462Sdim I->getOpcode() == Instruction::UDiv || 262193323Sed I->getOpcode() == Instruction::SDiv || 263193323Sed I->getOpcode() == Instruction::FDiv || 264193323Sed I->getOpcode() == Instruction::URem || 265193323Sed I->getOpcode() == Instruction::SRem || 266193323Sed I->getOpcode() == Instruction::FRem) 267193323Sed return true; 268193323Sed return false; 269193323Sed} 270193323Sed 271193323Sedvoid Reassociate::BuildRankMap(Function &F) { 272193323Sed unsigned i = 2; 273193323Sed 274193323Sed // Assign distinct ranks to function arguments 275193323Sed for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) 276193323Sed ValueRankMap[&*I] = ++i; 277193323Sed 278193323Sed ReversePostOrderTraversal<Function*> RPOT(&F); 279193323Sed for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(), 280193323Sed E = RPOT.end(); I != E; ++I) { 281193323Sed BasicBlock *BB = *I; 282193323Sed unsigned BBRank = RankMap[BB] = ++i << 16; 283193323Sed 284193323Sed // Walk the basic block, adding precomputed ranks for any instructions that 285193323Sed // we cannot move. This ensures that the ranks for these instructions are 286193323Sed // all different in the block. 287193323Sed for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 288193323Sed if (isUnmovableInstruction(I)) 289193323Sed ValueRankMap[&*I] = ++BBRank; 290193323Sed } 291193323Sed} 292193323Sed 293193323Sedunsigned Reassociate::getRank(Value *V) { 294193323Sed Instruction *I = dyn_cast<Instruction>(V); 295201360Srdivacky if (I == 0) { 296201360Srdivacky if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument. 297201360Srdivacky return 0; // Otherwise it's a global or constant, rank 0. 298201360Srdivacky } 299193323Sed 300201360Srdivacky if (unsigned Rank = ValueRankMap[I]) 301201360Srdivacky return Rank; // Rank already known? 302193323Sed 303193323Sed // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that 304193323Sed // we can reassociate expressions for code motion! Since we do not recurse 305193323Sed // for PHI nodes, we cannot have infinite recursion here, because there 306193323Sed // cannot be loops in the value graph that do not go through PHI nodes. 307193323Sed unsigned Rank = 0, MaxRank = RankMap[I->getParent()]; 308193323Sed for (unsigned i = 0, e = I->getNumOperands(); 309193323Sed i != e && Rank != MaxRank; ++i) 310193323Sed Rank = std::max(Rank, getRank(I->getOperand(i))); 311193323Sed 312193323Sed // If this is a not or neg instruction, do not count it for rank. This 313193323Sed // assures us that X and ~X will have the same rank. 314203954Srdivacky if (!I->getType()->isIntegerTy() || 315193323Sed (!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I))) 316193323Sed ++Rank; 317193323Sed 318202375Srdivacky //DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " 319198090Srdivacky // << Rank << "\n"); 320193323Sed 321201360Srdivacky return ValueRankMap[I] = Rank; 322193323Sed} 323193323Sed 324193323Sed/// LowerNegateToMultiply - Replace 0-X with X*-1. 325193323Sed/// 326239462Sdimstatic BinaryOperator *LowerNegateToMultiply(Instruction *Neg) { 327198090Srdivacky Constant *Cst = Constant::getAllOnesValue(Neg->getType()); 328193323Sed 329239462Sdim BinaryOperator *Res = 330239462Sdim BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg); 331239462Sdim Neg->setOperand(1, Constant::getNullValue(Neg->getType())); // Drop use of op. 332193323Sed Res->takeName(Neg); 333193323Sed Neg->replaceAllUsesWith(Res); 334221345Sdim Res->setDebugLoc(Neg->getDebugLoc()); 335193323Sed return Res; 336193323Sed} 337193323Sed 338239462Sdim/// CarmichaelShift - Returns k such that lambda(2^Bitwidth) = 2^k, where lambda 339239462Sdim/// is the Carmichael function. This means that x^(2^k) === 1 mod 2^Bitwidth for 340239462Sdim/// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic. 341239462Sdim/// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every 342239462Sdim/// even x in Bitwidth-bit arithmetic. 343239462Sdimstatic unsigned CarmichaelShift(unsigned Bitwidth) { 344239462Sdim if (Bitwidth < 3) 345239462Sdim return Bitwidth - 1; 346239462Sdim return Bitwidth - 2; 347239462Sdim} 348193323Sed 349239462Sdim/// IncorporateWeight - Add the extra weight 'RHS' to the existing weight 'LHS', 350239462Sdim/// reducing the combined weight using any special properties of the operation. 351239462Sdim/// The existing weight LHS represents the computation X op X op ... op X where 352239462Sdim/// X occurs LHS times. The combined weight represents X op X op ... op X with 353239462Sdim/// X occurring LHS + RHS times. If op is "Xor" for example then the combined 354239462Sdim/// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even; 355239462Sdim/// the routine returns 1 in LHS in the first case, and 0 in LHS in the second. 356239462Sdimstatic void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) { 357239462Sdim // If we were working with infinite precision arithmetic then the combined 358239462Sdim // weight would be LHS + RHS. But we are using finite precision arithmetic, 359239462Sdim // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct 360239462Sdim // for nilpotent operations and addition, but not for idempotent operations 361239462Sdim // and multiplication), so it is important to correctly reduce the combined 362239462Sdim // weight back into range if wrapping would be wrong. 363193323Sed 364239462Sdim // If RHS is zero then the weight didn't change. 365239462Sdim if (RHS.isMinValue()) 366239462Sdim return; 367239462Sdim // If LHS is zero then the combined weight is RHS. 368239462Sdim if (LHS.isMinValue()) { 369239462Sdim LHS = RHS; 370239462Sdim return; 371239462Sdim } 372239462Sdim // From this point on we know that neither LHS nor RHS is zero. 373193323Sed 374239462Sdim if (Instruction::isIdempotent(Opcode)) { 375239462Sdim // Idempotent means X op X === X, so any non-zero weight is equivalent to a 376239462Sdim // weight of 1. Keeping weights at zero or one also means that wrapping is 377239462Sdim // not a problem. 378239462Sdim assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); 379239462Sdim return; // Return a weight of 1. 380239462Sdim } 381239462Sdim if (Instruction::isNilpotent(Opcode)) { 382239462Sdim // Nilpotent means X op X === 0, so reduce weights modulo 2. 383239462Sdim assert(LHS == 1 && RHS == 1 && "Weights not reduced!"); 384239462Sdim LHS = 0; // 1 + 1 === 0 modulo 2. 385239462Sdim return; 386239462Sdim } 387239462Sdim if (Opcode == Instruction::Add) { 388239462Sdim // TODO: Reduce the weight by exploiting nsw/nuw? 389239462Sdim LHS += RHS; 390239462Sdim return; 391239462Sdim } 392193323Sed 393239462Sdim assert(Opcode == Instruction::Mul && "Unknown associative operation!"); 394239462Sdim unsigned Bitwidth = LHS.getBitWidth(); 395239462Sdim // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth 396239462Sdim // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth 397239462Sdim // bit number x, since either x is odd in which case x^CM = 1, or x is even in 398239462Sdim // which case both x^W and x^(W - CM) are zero. By subtracting off multiples 399239462Sdim // of CM like this weights can always be reduced to the range [0, CM+Bitwidth) 400239462Sdim // which by a happy accident means that they can always be represented using 401239462Sdim // Bitwidth bits. 402239462Sdim // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than 403239462Sdim // the Carmichael number). 404239462Sdim if (Bitwidth > 3) { 405239462Sdim /// CM - The value of Carmichael's lambda function. 406239462Sdim APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth)); 407239462Sdim // Any weight W >= Threshold can be replaced with W - CM. 408239462Sdim APInt Threshold = CM + Bitwidth; 409239462Sdim assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!"); 410239462Sdim // For Bitwidth 4 or more the following sum does not overflow. 411239462Sdim LHS += RHS; 412239462Sdim while (LHS.uge(Threshold)) 413239462Sdim LHS -= CM; 414239462Sdim } else { 415239462Sdim // To avoid problems with overflow do everything the same as above but using 416239462Sdim // a larger type. 417239462Sdim unsigned CM = 1U << CarmichaelShift(Bitwidth); 418239462Sdim unsigned Threshold = CM + Bitwidth; 419239462Sdim assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold && 420239462Sdim "Weights not reduced!"); 421239462Sdim unsigned Total = LHS.getZExtValue() + RHS.getZExtValue(); 422239462Sdim while (Total >= Threshold) 423239462Sdim Total -= CM; 424239462Sdim LHS = Total; 425239462Sdim } 426239462Sdim} 427218893Sdim 428239462Sdimtypedef std::pair<Value*, APInt> RepeatedValue; 429193323Sed 430239462Sdim/// LinearizeExprTree - Given an associative binary expression, return the leaf 431239462Sdim/// nodes in Ops along with their weights (how many times the leaf occurs). The 432239462Sdim/// original expression is the same as 433239462Sdim/// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times 434239462Sdim/// op 435239462Sdim/// (Ops[1].first op Ops[1].first op ... Ops[1].first) <- Ops[1].second times 436239462Sdim/// op 437239462Sdim/// ... 438239462Sdim/// op 439239462Sdim/// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times 440193323Sed/// 441243830Sdim/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct. 442193323Sed/// 443239462Sdim/// This routine may modify the function, in which case it returns 'true'. The 444239462Sdim/// changes it makes may well be destructive, changing the value computed by 'I' 445239462Sdim/// to something completely different. Thus if the routine returns 'true' then 446239462Sdim/// you MUST either replace I with a new expression computed from the Ops array, 447239462Sdim/// or use RewriteExprTree to put the values back in. 448239462Sdim/// 449239462Sdim/// A leaf node is either not a binary operation of the same kind as the root 450239462Sdim/// node 'I' (i.e. is not a binary operator at all, or is, but with a different 451239462Sdim/// opcode), or is the same kind of binary operator but has a use which either 452239462Sdim/// does not belong to the expression, or does belong to the expression but is 453239462Sdim/// a leaf node. Every leaf node has at least one use that is a non-leaf node 454239462Sdim/// of the expression, while for non-leaf nodes (except for the root 'I') every 455239462Sdim/// use is a non-leaf node of the expression. 456239462Sdim/// 457239462Sdim/// For example: 458239462Sdim/// expression graph node names 459239462Sdim/// 460239462Sdim/// + | I 461239462Sdim/// / \ | 462239462Sdim/// + + | A, B 463239462Sdim/// / \ / \ | 464239462Sdim/// * + * | C, D, E 465239462Sdim/// / \ / \ / \ | 466239462Sdim/// + * | F, G 467239462Sdim/// 468239462Sdim/// The leaf nodes are C, E, F and G. The Ops array will contain (maybe not in 469239462Sdim/// that order) (C, 1), (E, 1), (F, 2), (G, 2). 470239462Sdim/// 471239462Sdim/// The expression is maximal: if some instruction is a binary operator of the 472239462Sdim/// same kind as 'I', and all of its uses are non-leaf nodes of the expression, 473239462Sdim/// then the instruction also belongs to the expression, is not a leaf node of 474239462Sdim/// it, and its operands also belong to the expression (but may be leaf nodes). 475239462Sdim/// 476239462Sdim/// NOTE: This routine will set operands of non-leaf non-root nodes to undef in 477239462Sdim/// order to ensure that every non-root node in the expression has *exactly one* 478239462Sdim/// use by a non-leaf node of the expression. This destruction means that the 479239462Sdim/// caller MUST either replace 'I' with a new expression or use something like 480239462Sdim/// RewriteExprTree to put the values back in if the routine indicates that it 481239462Sdim/// made a change by returning 'true'. 482239462Sdim/// 483239462Sdim/// In the above example either the right operand of A or the left operand of B 484239462Sdim/// will be replaced by undef. If it is B's operand then this gives: 485239462Sdim/// 486239462Sdim/// + | I 487239462Sdim/// / \ | 488239462Sdim/// + + | A, B - operand of B replaced with undef 489239462Sdim/// / \ \ | 490239462Sdim/// * + * | C, D, E 491239462Sdim/// / \ / \ / \ | 492239462Sdim/// + * | F, G 493239462Sdim/// 494239462Sdim/// Note that such undef operands can only be reached by passing through 'I'. 495239462Sdim/// For example, if you visit operands recursively starting from a leaf node 496239462Sdim/// then you will never see such an undef operand unless you get back to 'I', 497239462Sdim/// which requires passing through a phi node. 498239462Sdim/// 499239462Sdim/// Note that this routine may also mutate binary operators of the wrong type 500239462Sdim/// that have all uses inside the expression (i.e. only used by non-leaf nodes 501239462Sdim/// of the expression) if it can turn them into binary operators of the right 502239462Sdim/// type and thus make the expression bigger. 503239462Sdim 504239462Sdimstatic bool LinearizeExprTree(BinaryOperator *I, 505239462Sdim SmallVectorImpl<RepeatedValue> &Ops) { 506239462Sdim DEBUG(dbgs() << "LINEARIZE: " << *I << '\n'); 507239462Sdim unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits(); 508193323Sed unsigned Opcode = I->getOpcode(); 509239462Sdim assert(Instruction::isAssociative(Opcode) && 510239462Sdim Instruction::isCommutative(Opcode) && 511239462Sdim "Expected an associative and commutative operation!"); 512193323Sed 513239462Sdim // Visit all operands of the expression, keeping track of their weight (the 514239462Sdim // number of paths from the expression root to the operand, or if you like 515239462Sdim // the number of times that operand occurs in the linearized expression). 516239462Sdim // For example, if I = X + A, where X = A + B, then I, X and B have weight 1 517239462Sdim // while A has weight two. 518193323Sed 519239462Sdim // Worklist of non-leaf nodes (their operands are in the expression too) along 520239462Sdim // with their weights, representing a certain number of paths to the operator. 521239462Sdim // If an operator occurs in the worklist multiple times then we found multiple 522239462Sdim // ways to get to it. 523239462Sdim SmallVector<std::pair<BinaryOperator*, APInt>, 8> Worklist; // (Op, Weight) 524239462Sdim Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1))); 525239462Sdim bool MadeChange = false; 526239462Sdim 527239462Sdim // Leaves of the expression are values that either aren't the right kind of 528239462Sdim // operation (eg: a constant, or a multiply in an add tree), or are, but have 529239462Sdim // some uses that are not inside the expression. For example, in I = X + X, 530239462Sdim // X = A + B, the value X has two uses (by I) that are in the expression. If 531239462Sdim // X has any other uses, for example in a return instruction, then we consider 532239462Sdim // X to be a leaf, and won't analyze it further. When we first visit a value, 533239462Sdim // if it has more than one use then at first we conservatively consider it to 534239462Sdim // be a leaf. Later, as the expression is explored, we may discover some more 535239462Sdim // uses of the value from inside the expression. If all uses turn out to be 536239462Sdim // from within the expression (and the value is a binary operator of the right 537239462Sdim // kind) then the value is no longer considered to be a leaf, and its operands 538239462Sdim // are explored. 539239462Sdim 540239462Sdim // Leaves - Keeps track of the set of putative leaves as well as the number of 541239462Sdim // paths to each leaf seen so far. 542239462Sdim typedef DenseMap<Value*, APInt> LeafMap; 543239462Sdim LeafMap Leaves; // Leaf -> Total weight so far. 544239462Sdim SmallVector<Value*, 8> LeafOrder; // Ensure deterministic leaf output order. 545239462Sdim 546239462Sdim#ifndef NDEBUG 547239462Sdim SmallPtrSet<Value*, 8> Visited; // For sanity checking the iteration scheme. 548239462Sdim#endif 549239462Sdim while (!Worklist.empty()) { 550239462Sdim std::pair<BinaryOperator*, APInt> P = Worklist.pop_back_val(); 551239462Sdim I = P.first; // We examine the operands of this binary operator. 552239462Sdim 553239462Sdim for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { // Visit operands. 554239462Sdim Value *Op = I->getOperand(OpIdx); 555239462Sdim APInt Weight = P.second; // Number of paths to this operand. 556239462Sdim DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n"); 557239462Sdim assert(!Op->use_empty() && "No uses, so how did we get to it?!"); 558239462Sdim 559239462Sdim // If this is a binary operation of the right kind with only one use then 560239462Sdim // add its operands to the expression. 561239462Sdim if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { 562239462Sdim assert(Visited.insert(Op) && "Not first visit!"); 563239462Sdim DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n"); 564239462Sdim Worklist.push_back(std::make_pair(BO, Weight)); 565239462Sdim continue; 566239462Sdim } 567239462Sdim 568239462Sdim // Appears to be a leaf. Is the operand already in the set of leaves? 569239462Sdim LeafMap::iterator It = Leaves.find(Op); 570239462Sdim if (It == Leaves.end()) { 571239462Sdim // Not in the leaf map. Must be the first time we saw this operand. 572239462Sdim assert(Visited.insert(Op) && "Not first visit!"); 573239462Sdim if (!Op->hasOneUse()) { 574239462Sdim // This value has uses not accounted for by the expression, so it is 575239462Sdim // not safe to modify. Mark it as being a leaf. 576239462Sdim DEBUG(dbgs() << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n"); 577239462Sdim LeafOrder.push_back(Op); 578239462Sdim Leaves[Op] = Weight; 579239462Sdim continue; 580239462Sdim } 581239462Sdim // No uses outside the expression, try morphing it. 582239462Sdim } else if (It != Leaves.end()) { 583239462Sdim // Already in the leaf map. 584239462Sdim assert(Visited.count(Op) && "In leaf map but not visited!"); 585239462Sdim 586239462Sdim // Update the number of paths to the leaf. 587239462Sdim IncorporateWeight(It->second, Weight, Opcode); 588239462Sdim 589239462Sdim#if 0 // TODO: Re-enable once PR13021 is fixed. 590239462Sdim // The leaf already has one use from inside the expression. As we want 591239462Sdim // exactly one such use, drop this new use of the leaf. 592239462Sdim assert(!Op->hasOneUse() && "Only one use, but we got here twice!"); 593239462Sdim I->setOperand(OpIdx, UndefValue::get(I->getType())); 594239462Sdim MadeChange = true; 595239462Sdim 596239462Sdim // If the leaf is a binary operation of the right kind and we now see 597239462Sdim // that its multiple original uses were in fact all by nodes belonging 598239462Sdim // to the expression, then no longer consider it to be a leaf and add 599239462Sdim // its operands to the expression. 600239462Sdim if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) { 601239462Sdim DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n"); 602239462Sdim Worklist.push_back(std::make_pair(BO, It->second)); 603239462Sdim Leaves.erase(It); 604239462Sdim continue; 605239462Sdim } 606239462Sdim#endif 607239462Sdim 608239462Sdim // If we still have uses that are not accounted for by the expression 609239462Sdim // then it is not safe to modify the value. 610239462Sdim if (!Op->hasOneUse()) 611239462Sdim continue; 612239462Sdim 613239462Sdim // No uses outside the expression, try morphing it. 614239462Sdim Weight = It->second; 615239462Sdim Leaves.erase(It); // Since the value may be morphed below. 616239462Sdim } 617239462Sdim 618239462Sdim // At this point we have a value which, first of all, is not a binary 619239462Sdim // expression of the right kind, and secondly, is only used inside the 620239462Sdim // expression. This means that it can safely be modified. See if we 621239462Sdim // can usefully morph it into an expression of the right kind. 622239462Sdim assert((!isa<Instruction>(Op) || 623239462Sdim cast<Instruction>(Op)->getOpcode() != Opcode) && 624239462Sdim "Should have been handled above!"); 625239462Sdim assert(Op->hasOneUse() && "Has uses outside the expression tree!"); 626239462Sdim 627239462Sdim // If this is a multiply expression, turn any internal negations into 628239462Sdim // multiplies by -1 so they can be reassociated. 629239462Sdim BinaryOperator *BO = dyn_cast<BinaryOperator>(Op); 630239462Sdim if (Opcode == Instruction::Mul && BO && BinaryOperator::isNeg(BO)) { 631239462Sdim DEBUG(dbgs() << "MORPH LEAF: " << *Op << " (" << Weight << ") TO "); 632239462Sdim BO = LowerNegateToMultiply(BO); 633239462Sdim DEBUG(dbgs() << *BO << 'n'); 634239462Sdim Worklist.push_back(std::make_pair(BO, Weight)); 635239462Sdim MadeChange = true; 636239462Sdim continue; 637239462Sdim } 638239462Sdim 639239462Sdim // Failed to morph into an expression of the right type. This really is 640239462Sdim // a leaf. 641239462Sdim DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n"); 642239462Sdim assert(!isReassociableOp(Op, Opcode) && "Value was morphed?"); 643239462Sdim LeafOrder.push_back(Op); 644239462Sdim Leaves[Op] = Weight; 645193323Sed } 646193323Sed } 647193323Sed 648239462Sdim // The leaves, repeated according to their weights, represent the linearized 649239462Sdim // form of the expression. 650239462Sdim for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) { 651239462Sdim Value *V = LeafOrder[i]; 652239462Sdim LeafMap::iterator It = Leaves.find(V); 653239462Sdim if (It == Leaves.end()) 654239462Sdim // Node initially thought to be a leaf wasn't. 655239462Sdim continue; 656239462Sdim assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!"); 657239462Sdim APInt Weight = It->second; 658239462Sdim if (Weight.isMinValue()) 659239462Sdim // Leaf already output or weight reduction eliminated it. 660239462Sdim continue; 661239462Sdim // Ensure the leaf is only output once. 662239462Sdim It->second = 0; 663239462Sdim Ops.push_back(std::make_pair(V, Weight)); 664193323Sed } 665193323Sed 666239462Sdim // For nilpotent operations or addition there may be no operands, for example 667239462Sdim // because the expression was "X xor X" or consisted of 2^Bitwidth additions: 668239462Sdim // in both cases the weight reduces to 0 causing the value to be skipped. 669239462Sdim if (Ops.empty()) { 670243830Sdim Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType()); 671239462Sdim assert(Identity && "Associative operation without identity!"); 672239462Sdim Ops.push_back(std::make_pair(Identity, APInt(Bitwidth, 1))); 673239462Sdim } 674193323Sed 675239462Sdim return MadeChange; 676193323Sed} 677193323Sed 678193323Sed// RewriteExprTree - Now that the operands for this expression tree are 679239462Sdim// linearized and optimized, emit them in-order. 680193323Sedvoid Reassociate::RewriteExprTree(BinaryOperator *I, 681239462Sdim SmallVectorImpl<ValueEntry> &Ops) { 682239462Sdim assert(Ops.size() > 1 && "Single values should be used directly!"); 683218893Sdim 684243830Sdim // Since our optimizations should never increase the number of operations, the 685243830Sdim // new expression can usually be written reusing the existing binary operators 686239462Sdim // from the original expression tree, without creating any new instructions, 687239462Sdim // though the rewritten expression may have a completely different topology. 688239462Sdim // We take care to not change anything if the new expression will be the same 689239462Sdim // as the original. If more than trivial changes (like commuting operands) 690239462Sdim // were made then we are obliged to clear out any optional subclass data like 691239462Sdim // nsw flags. 692218893Sdim 693239462Sdim /// NodesToRewrite - Nodes from the original expression available for writing 694239462Sdim /// the new expression into. 695239462Sdim SmallVector<BinaryOperator*, 8> NodesToRewrite; 696239462Sdim unsigned Opcode = I->getOpcode(); 697239462Sdim BinaryOperator *Op = I; 698239462Sdim 699243830Sdim /// NotRewritable - The operands being written will be the leaves of the new 700243830Sdim /// expression and must not be used as inner nodes (via NodesToRewrite) by 701243830Sdim /// mistake. Inner nodes are always reassociable, and usually leaves are not 702243830Sdim /// (if they were they would have been incorporated into the expression and so 703243830Sdim /// would not be leaves), so most of the time there is no danger of this. But 704243830Sdim /// in rare cases a leaf may become reassociable if an optimization kills uses 705243830Sdim /// of it, or it may momentarily become reassociable during rewriting (below) 706243830Sdim /// due it being removed as an operand of one of its uses. Ensure that misuse 707243830Sdim /// of leaf nodes as inner nodes cannot occur by remembering all of the future 708243830Sdim /// leaves and refusing to reuse any of them as inner nodes. 709243830Sdim SmallPtrSet<Value*, 8> NotRewritable; 710243830Sdim for (unsigned i = 0, e = Ops.size(); i != e; ++i) 711243830Sdim NotRewritable.insert(Ops[i].Op); 712243830Sdim 713239462Sdim // ExpressionChanged - Non-null if the rewritten expression differs from the 714239462Sdim // original in some non-trivial way, requiring the clearing of optional flags. 715239462Sdim // Flags are cleared from the operator in ExpressionChanged up to I inclusive. 716239462Sdim BinaryOperator *ExpressionChanged = 0; 717239462Sdim for (unsigned i = 0; ; ++i) { 718239462Sdim // The last operation (which comes earliest in the IR) is special as both 719239462Sdim // operands will come from Ops, rather than just one with the other being 720239462Sdim // a subexpression. 721239462Sdim if (i+2 == Ops.size()) { 722239462Sdim Value *NewLHS = Ops[i].Op; 723239462Sdim Value *NewRHS = Ops[i+1].Op; 724239462Sdim Value *OldLHS = Op->getOperand(0); 725239462Sdim Value *OldRHS = Op->getOperand(1); 726239462Sdim 727239462Sdim if (NewLHS == OldLHS && NewRHS == OldRHS) 728239462Sdim // Nothing changed, leave it alone. 729239462Sdim break; 730239462Sdim 731239462Sdim if (NewLHS == OldRHS && NewRHS == OldLHS) { 732239462Sdim // The order of the operands was reversed. Swap them. 733239462Sdim DEBUG(dbgs() << "RA: " << *Op << '\n'); 734239462Sdim Op->swapOperands(); 735239462Sdim DEBUG(dbgs() << "TO: " << *Op << '\n'); 736239462Sdim MadeChange = true; 737239462Sdim ++NumChanged; 738239462Sdim break; 739239462Sdim } 740239462Sdim 741239462Sdim // The new operation differs non-trivially from the original. Overwrite 742239462Sdim // the old operands with the new ones. 743239462Sdim DEBUG(dbgs() << "RA: " << *Op << '\n'); 744239462Sdim if (NewLHS != OldLHS) { 745243830Sdim BinaryOperator *BO = isReassociableOp(OldLHS, Opcode); 746243830Sdim if (BO && !NotRewritable.count(BO)) 747239462Sdim NodesToRewrite.push_back(BO); 748239462Sdim Op->setOperand(0, NewLHS); 749239462Sdim } 750239462Sdim if (NewRHS != OldRHS) { 751243830Sdim BinaryOperator *BO = isReassociableOp(OldRHS, Opcode); 752243830Sdim if (BO && !NotRewritable.count(BO)) 753239462Sdim NodesToRewrite.push_back(BO); 754239462Sdim Op->setOperand(1, NewRHS); 755239462Sdim } 756239462Sdim DEBUG(dbgs() << "TO: " << *Op << '\n'); 757239462Sdim 758239462Sdim ExpressionChanged = Op; 759193323Sed MadeChange = true; 760193323Sed ++NumChanged; 761239462Sdim 762239462Sdim break; 763193323Sed } 764193323Sed 765239462Sdim // Not the last operation. The left-hand side will be a sub-expression 766239462Sdim // while the right-hand side will be the current element of Ops. 767239462Sdim Value *NewRHS = Ops[i].Op; 768239462Sdim if (NewRHS != Op->getOperand(1)) { 769239462Sdim DEBUG(dbgs() << "RA: " << *Op << '\n'); 770239462Sdim if (NewRHS == Op->getOperand(0)) { 771239462Sdim // The new right-hand side was already present as the left operand. If 772239462Sdim // we are lucky then swapping the operands will sort out both of them. 773239462Sdim Op->swapOperands(); 774239462Sdim } else { 775239462Sdim // Overwrite with the new right-hand side. 776243830Sdim BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode); 777243830Sdim if (BO && !NotRewritable.count(BO)) 778239462Sdim NodesToRewrite.push_back(BO); 779239462Sdim Op->setOperand(1, NewRHS); 780239462Sdim ExpressionChanged = Op; 781239462Sdim } 782239462Sdim DEBUG(dbgs() << "TO: " << *Op << '\n'); 783239462Sdim MadeChange = true; 784239462Sdim ++NumChanged; 785239462Sdim } 786218893Sdim 787239462Sdim // Now deal with the left-hand side. If this is already an operation node 788239462Sdim // from the original expression then just rewrite the rest of the expression 789239462Sdim // into it. 790243830Sdim BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode); 791243830Sdim if (BO && !NotRewritable.count(BO)) { 792239462Sdim Op = BO; 793239462Sdim continue; 794239462Sdim } 795218893Sdim 796239462Sdim // Otherwise, grab a spare node from the original expression and use that as 797239462Sdim // the left-hand side. If there are no nodes left then the optimizers made 798239462Sdim // an expression with more nodes than the original! This usually means that 799239462Sdim // they did something stupid but it might mean that the problem was just too 800239462Sdim // hard (finding the mimimal number of multiplications needed to realize a 801239462Sdim // multiplication expression is NP-complete). Whatever the reason, smart or 802239462Sdim // stupid, create a new node if there are none left. 803239462Sdim BinaryOperator *NewOp; 804239462Sdim if (NodesToRewrite.empty()) { 805239462Sdim Constant *Undef = UndefValue::get(I->getType()); 806239462Sdim NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode), 807239462Sdim Undef, Undef, "", I); 808239462Sdim } else { 809239462Sdim NewOp = NodesToRewrite.pop_back_val(); 810239462Sdim } 811239462Sdim 812239462Sdim DEBUG(dbgs() << "RA: " << *Op << '\n'); 813239462Sdim Op->setOperand(0, NewOp); 814239462Sdim DEBUG(dbgs() << "TO: " << *Op << '\n'); 815239462Sdim ExpressionChanged = Op; 816193323Sed MadeChange = true; 817193323Sed ++NumChanged; 818239462Sdim Op = NewOp; 819193323Sed } 820193323Sed 821239462Sdim // If the expression changed non-trivially then clear out all subclass data 822239462Sdim // starting from the operator specified in ExpressionChanged, and compactify 823239462Sdim // the operators to just before the expression root to guarantee that the 824239462Sdim // expression tree is dominated by all of Ops. 825239462Sdim if (ExpressionChanged) 826239462Sdim do { 827239462Sdim ExpressionChanged->clearSubclassOptionalData(); 828239462Sdim if (ExpressionChanged == I) 829239462Sdim break; 830239462Sdim ExpressionChanged->moveBefore(I); 831239462Sdim ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->use_begin()); 832239462Sdim } while (1); 833193323Sed 834239462Sdim // Throw away any left over nodes from the original expression. 835239462Sdim for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i) 836239462Sdim RedoInsts.insert(NodesToRewrite[i]); 837239462Sdim} 838193323Sed 839239462Sdim/// NegateValue - Insert instructions before the instruction pointed to by BI, 840239462Sdim/// that computes the negative version of the value specified. The negative 841239462Sdim/// version of the value is returned, and BI is left pointing at the instruction 842239462Sdim/// that should be processed next by the reassociation pass. 843199481Srdivackystatic Value *NegateValue(Value *V, Instruction *BI) { 844201360Srdivacky if (Constant *C = dyn_cast<Constant>(V)) 845201360Srdivacky return ConstantExpr::getNeg(C); 846239462Sdim 847193323Sed // We are trying to expose opportunity for reassociation. One of the things 848193323Sed // that we want to do to achieve this is to push a negation as deep into an 849193323Sed // expression chain as possible, to expose the add instructions. In practice, 850193323Sed // this means that we turn this: 851193323Sed // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D 852193323Sed // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate 853193323Sed // the constants. We assume that instcombine will clean up the mess later if 854201360Srdivacky // we introduce tons of unnecessary negation instructions. 855193323Sed // 856239462Sdim if (BinaryOperator *I = isReassociableOp(V, Instruction::Add)) { 857239462Sdim // Push the negates through the add. 858239462Sdim I->setOperand(0, NegateValue(I->getOperand(0), BI)); 859239462Sdim I->setOperand(1, NegateValue(I->getOperand(1), BI)); 860193323Sed 861239462Sdim // We must move the add instruction here, because the neg instructions do 862239462Sdim // not dominate the old add instruction in general. By moving it, we are 863239462Sdim // assured that the neg instructions we just inserted dominate the 864239462Sdim // instruction we are about to insert after them. 865239462Sdim // 866239462Sdim I->moveBefore(BI); 867239462Sdim I->setName(I->getName()+".neg"); 868239462Sdim return I; 869239462Sdim } 870239462Sdim 871201360Srdivacky // Okay, we need to materialize a negated version of V with an instruction. 872201360Srdivacky // Scan the use lists of V to see if we have one already. 873201360Srdivacky for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ 874210299Sed User *U = *UI; 875210299Sed if (!BinaryOperator::isNeg(U)) continue; 876193323Sed 877201360Srdivacky // We found one! Now we have to make sure that the definition dominates 878201360Srdivacky // this use. We do this by moving it to the entry block (if it is a 879201360Srdivacky // non-instruction value) or right after the definition. These negates will 880201360Srdivacky // be zapped by reassociate later, so we don't need much finesse here. 881210299Sed BinaryOperator *TheNeg = cast<BinaryOperator>(U); 882202375Srdivacky 883202375Srdivacky // Verify that the negate is in this function, V might be a constant expr. 884202375Srdivacky if (TheNeg->getParent()->getParent() != BI->getParent()->getParent()) 885202375Srdivacky continue; 886239462Sdim 887201360Srdivacky BasicBlock::iterator InsertPt; 888201360Srdivacky if (Instruction *InstInput = dyn_cast<Instruction>(V)) { 889201360Srdivacky if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) { 890201360Srdivacky InsertPt = II->getNormalDest()->begin(); 891201360Srdivacky } else { 892201360Srdivacky InsertPt = InstInput; 893201360Srdivacky ++InsertPt; 894201360Srdivacky } 895201360Srdivacky while (isa<PHINode>(InsertPt)) ++InsertPt; 896201360Srdivacky } else { 897201360Srdivacky InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin(); 898201360Srdivacky } 899201360Srdivacky TheNeg->moveBefore(InsertPt); 900201360Srdivacky return TheNeg; 901201360Srdivacky } 902201360Srdivacky 903193323Sed // Insert a 'neg' instruction that subtracts the value from zero to get the 904193323Sed // negation. 905193323Sed return BinaryOperator::CreateNeg(V, V->getName() + ".neg", BI); 906193323Sed} 907193323Sed 908193323Sed/// ShouldBreakUpSubtract - Return true if we should break up this subtract of 909193323Sed/// X-Y into (X + -Y). 910199481Srdivackystatic bool ShouldBreakUpSubtract(Instruction *Sub) { 911193323Sed // If this is a negation, we can't split it up! 912193323Sed if (BinaryOperator::isNeg(Sub)) 913193323Sed return false; 914239462Sdim 915193323Sed // Don't bother to break this up unless either the LHS is an associable add or 916193323Sed // subtract or if this is only used by one. 917193323Sed if (isReassociableOp(Sub->getOperand(0), Instruction::Add) || 918193323Sed isReassociableOp(Sub->getOperand(0), Instruction::Sub)) 919193323Sed return true; 920193323Sed if (isReassociableOp(Sub->getOperand(1), Instruction::Add) || 921193323Sed isReassociableOp(Sub->getOperand(1), Instruction::Sub)) 922193323Sed return true; 923239462Sdim if (Sub->hasOneUse() && 924193323Sed (isReassociableOp(Sub->use_back(), Instruction::Add) || 925193323Sed isReassociableOp(Sub->use_back(), Instruction::Sub))) 926193323Sed return true; 927239462Sdim 928193323Sed return false; 929193323Sed} 930193323Sed 931193323Sed/// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is 932193323Sed/// only used by an add, transform this into (X+(0-Y)) to promote better 933193323Sed/// reassociation. 934239462Sdimstatic BinaryOperator *BreakUpSubtract(Instruction *Sub) { 935201360Srdivacky // Convert a subtract into an add and a neg instruction. This allows sub 936201360Srdivacky // instructions to be commuted with other add instructions. 937193323Sed // 938201360Srdivacky // Calculate the negative value of Operand 1 of the sub instruction, 939201360Srdivacky // and set it as the RHS of the add instruction we just made. 940193323Sed // 941199481Srdivacky Value *NegVal = NegateValue(Sub->getOperand(1), Sub); 942239462Sdim BinaryOperator *New = 943193323Sed BinaryOperator::CreateAdd(Sub->getOperand(0), NegVal, "", Sub); 944239462Sdim Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op. 945239462Sdim Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op. 946193323Sed New->takeName(Sub); 947193323Sed 948193323Sed // Everyone now refers to the add instruction. 949193323Sed Sub->replaceAllUsesWith(New); 950221345Sdim New->setDebugLoc(Sub->getDebugLoc()); 951193323Sed 952202375Srdivacky DEBUG(dbgs() << "Negated: " << *New << '\n'); 953193323Sed return New; 954193323Sed} 955193323Sed 956193323Sed/// ConvertShiftToMul - If this is a shift of a reassociable multiply or is used 957193323Sed/// by one, change this into a multiply by a constant to assist with further 958193323Sed/// reassociation. 959239462Sdimstatic BinaryOperator *ConvertShiftToMul(Instruction *Shl) { 960239462Sdim Constant *MulCst = ConstantInt::get(Shl->getType(), 1); 961239462Sdim MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1))); 962239462Sdim 963239462Sdim BinaryOperator *Mul = 964239462Sdim BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl); 965239462Sdim Shl->setOperand(0, UndefValue::get(Shl->getType())); // Drop use of op. 966239462Sdim Mul->takeName(Shl); 967239462Sdim Shl->replaceAllUsesWith(Mul); 968239462Sdim Mul->setDebugLoc(Shl->getDebugLoc()); 969239462Sdim return Mul; 970193323Sed} 971193323Sed 972239462Sdim/// FindInOperandList - Scan backwards and forwards among values with the same 973239462Sdim/// rank as element i to see if X exists. If X does not exist, return i. This 974239462Sdim/// is useful when scanning for 'x' when we see '-x' because they both get the 975239462Sdim/// same rank. 976201360Srdivackystatic unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i, 977193323Sed Value *X) { 978193323Sed unsigned XRank = Ops[i].Rank; 979193323Sed unsigned e = Ops.size(); 980193323Sed for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j) 981193323Sed if (Ops[j].Op == X) 982193323Sed return j; 983201360Srdivacky // Scan backwards. 984193323Sed for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j) 985193323Sed if (Ops[j].Op == X) 986193323Sed return j; 987193323Sed return i; 988193323Sed} 989193323Sed 990193323Sed/// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together 991193323Sed/// and returning the result. Insert the tree before I. 992234982Sdimstatic Value *EmitAddTreeOfValues(Instruction *I, 993234982Sdim SmallVectorImpl<WeakVH> &Ops){ 994193323Sed if (Ops.size() == 1) return Ops.back(); 995239462Sdim 996193323Sed Value *V1 = Ops.back(); 997193323Sed Ops.pop_back(); 998193323Sed Value *V2 = EmitAddTreeOfValues(I, Ops); 999193323Sed return BinaryOperator::CreateAdd(V2, V1, "tmp", I); 1000193323Sed} 1001193323Sed 1002239462Sdim/// RemoveFactorFromExpression - If V is an expression tree that is a 1003193323Sed/// multiplication sequence, and if this sequence contains a multiply by Factor, 1004193323Sed/// remove Factor from the tree and return the new tree. 1005193323SedValue *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) { 1006193323Sed BinaryOperator *BO = isReassociableOp(V, Instruction::Mul); 1007193323Sed if (!BO) return 0; 1008239462Sdim 1009239462Sdim SmallVector<RepeatedValue, 8> Tree; 1010239462Sdim MadeChange |= LinearizeExprTree(BO, Tree); 1011201360Srdivacky SmallVector<ValueEntry, 8> Factors; 1012239462Sdim Factors.reserve(Tree.size()); 1013239462Sdim for (unsigned i = 0, e = Tree.size(); i != e; ++i) { 1014239462Sdim RepeatedValue E = Tree[i]; 1015239462Sdim Factors.append(E.second.getZExtValue(), 1016239462Sdim ValueEntry(getRank(E.first), E.first)); 1017239462Sdim } 1018193323Sed 1019193323Sed bool FoundFactor = false; 1020201360Srdivacky bool NeedsNegate = false; 1021201360Srdivacky for (unsigned i = 0, e = Factors.size(); i != e; ++i) { 1022193323Sed if (Factors[i].Op == Factor) { 1023193323Sed FoundFactor = true; 1024193323Sed Factors.erase(Factors.begin()+i); 1025193323Sed break; 1026193323Sed } 1027239462Sdim 1028201360Srdivacky // If this is a negative version of this factor, remove it. 1029201360Srdivacky if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) 1030201360Srdivacky if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op)) 1031201360Srdivacky if (FC1->getValue() == -FC2->getValue()) { 1032201360Srdivacky FoundFactor = NeedsNegate = true; 1033201360Srdivacky Factors.erase(Factors.begin()+i); 1034201360Srdivacky break; 1035201360Srdivacky } 1036201360Srdivacky } 1037239462Sdim 1038193323Sed if (!FoundFactor) { 1039193323Sed // Make sure to restore the operands to the expression tree. 1040193323Sed RewriteExprTree(BO, Factors); 1041193323Sed return 0; 1042193323Sed } 1043239462Sdim 1044201360Srdivacky BasicBlock::iterator InsertPt = BO; ++InsertPt; 1045239462Sdim 1046201360Srdivacky // If this was just a single multiply, remove the multiply and return the only 1047201360Srdivacky // remaining operand. 1048201360Srdivacky if (Factors.size() == 1) { 1049239462Sdim RedoInsts.insert(BO); 1050201360Srdivacky V = Factors[0].Op; 1051201360Srdivacky } else { 1052201360Srdivacky RewriteExprTree(BO, Factors); 1053201360Srdivacky V = BO; 1054201360Srdivacky } 1055239462Sdim 1056201360Srdivacky if (NeedsNegate) 1057201360Srdivacky V = BinaryOperator::CreateNeg(V, "neg", InsertPt); 1058239462Sdim 1059201360Srdivacky return V; 1060193323Sed} 1061193323Sed 1062193323Sed/// FindSingleUseMultiplyFactors - If V is a single-use multiply, recursively 1063193323Sed/// add its operands as factors, otherwise add V to the list of factors. 1064204792Srdivacky/// 1065204792Srdivacky/// Ops is the top-level list of add operands we're trying to factor. 1066193323Sedstatic void FindSingleUseMultiplyFactors(Value *V, 1067204792Srdivacky SmallVectorImpl<Value*> &Factors, 1068239462Sdim const SmallVectorImpl<ValueEntry> &Ops) { 1069239462Sdim BinaryOperator *BO = isReassociableOp(V, Instruction::Mul); 1070239462Sdim if (!BO) { 1071193323Sed Factors.push_back(V); 1072193323Sed return; 1073193323Sed } 1074239462Sdim 1075193323Sed // Otherwise, add the LHS and RHS to the list of factors. 1076239462Sdim FindSingleUseMultiplyFactors(BO->getOperand(1), Factors, Ops); 1077239462Sdim FindSingleUseMultiplyFactors(BO->getOperand(0), Factors, Ops); 1078193323Sed} 1079193323Sed 1080201360Srdivacky/// OptimizeAndOrXor - Optimize a series of operands to an 'and', 'or', or 'xor' 1081201360Srdivacky/// instruction. This optimizes based on identities. If it can be reduced to 1082201360Srdivacky/// a single Value, it is returned, otherwise the Ops list is mutated as 1083201360Srdivacky/// necessary. 1084201360Srdivackystatic Value *OptimizeAndOrXor(unsigned Opcode, 1085201360Srdivacky SmallVectorImpl<ValueEntry> &Ops) { 1086201360Srdivacky // Scan the operand lists looking for X and ~X pairs, along with X,X pairs. 1087201360Srdivacky // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1. 1088201360Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1089201360Srdivacky // First, check for X and ~X in the operand list. 1090201360Srdivacky assert(i < Ops.size()); 1091201360Srdivacky if (BinaryOperator::isNot(Ops[i].Op)) { // Cannot occur for ^. 1092201360Srdivacky Value *X = BinaryOperator::getNotArgument(Ops[i].Op); 1093201360Srdivacky unsigned FoundX = FindInOperandList(Ops, i, X); 1094201360Srdivacky if (FoundX != i) { 1095201360Srdivacky if (Opcode == Instruction::And) // ...&X&~X = 0 1096201360Srdivacky return Constant::getNullValue(X->getType()); 1097239462Sdim 1098201360Srdivacky if (Opcode == Instruction::Or) // ...|X|~X = -1 1099201360Srdivacky return Constant::getAllOnesValue(X->getType()); 1100201360Srdivacky } 1101201360Srdivacky } 1102239462Sdim 1103201360Srdivacky // Next, check for duplicate pairs of values, which we assume are next to 1104201360Srdivacky // each other, due to our sorting criteria. 1105201360Srdivacky assert(i < Ops.size()); 1106201360Srdivacky if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) { 1107201360Srdivacky if (Opcode == Instruction::And || Opcode == Instruction::Or) { 1108201360Srdivacky // Drop duplicate values for And and Or. 1109201360Srdivacky Ops.erase(Ops.begin()+i); 1110201360Srdivacky --i; --e; 1111201360Srdivacky ++NumAnnihil; 1112201360Srdivacky continue; 1113201360Srdivacky } 1114239462Sdim 1115201360Srdivacky // Drop pairs of values for Xor. 1116201360Srdivacky assert(Opcode == Instruction::Xor); 1117201360Srdivacky if (e == 2) 1118201360Srdivacky return Constant::getNullValue(Ops[0].Op->getType()); 1119239462Sdim 1120201360Srdivacky // Y ^ X^X -> Y 1121201360Srdivacky Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1122201360Srdivacky i -= 1; e -= 2; 1123201360Srdivacky ++NumAnnihil; 1124201360Srdivacky } 1125201360Srdivacky } 1126201360Srdivacky return 0; 1127201360Srdivacky} 1128193323Sed 1129249423Sdim/// Helper funciton of CombineXorOpnd(). It creates a bitwise-and 1130249423Sdim/// instruction with the given two operands, and return the resulting 1131249423Sdim/// instruction. There are two special cases: 1) if the constant operand is 0, 1132249423Sdim/// it will return NULL. 2) if the constant is ~0, the symbolic operand will 1133249423Sdim/// be returned. 1134249423Sdimstatic Value *createAndInstr(Instruction *InsertBefore, Value *Opnd, 1135249423Sdim const APInt &ConstOpnd) { 1136249423Sdim if (ConstOpnd != 0) { 1137249423Sdim if (!ConstOpnd.isAllOnesValue()) { 1138249423Sdim LLVMContext &Ctx = Opnd->getType()->getContext(); 1139249423Sdim Instruction *I; 1140249423Sdim I = BinaryOperator::CreateAnd(Opnd, ConstantInt::get(Ctx, ConstOpnd), 1141249423Sdim "and.ra", InsertBefore); 1142249423Sdim I->setDebugLoc(InsertBefore->getDebugLoc()); 1143249423Sdim return I; 1144249423Sdim } 1145249423Sdim return Opnd; 1146249423Sdim } 1147249423Sdim return 0; 1148249423Sdim} 1149249423Sdim 1150249423Sdim// Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd" 1151249423Sdim// into "R ^ C", where C would be 0, and R is a symbolic value. 1152249423Sdim// 1153249423Sdim// If it was successful, true is returned, and the "R" and "C" is returned 1154249423Sdim// via "Res" and "ConstOpnd", respectively; otherwise, false is returned, 1155249423Sdim// and both "Res" and "ConstOpnd" remain unchanged. 1156249423Sdim// 1157249423Sdimbool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, 1158249423Sdim APInt &ConstOpnd, Value *&Res) { 1159249423Sdim // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2 1160249423Sdim // = ((x | c1) ^ c1) ^ (c1 ^ c2) 1161249423Sdim // = (x & ~c1) ^ (c1 ^ c2) 1162249423Sdim // It is useful only when c1 == c2. 1163249423Sdim if (Opnd1->isOrExpr() && Opnd1->getConstPart() != 0) { 1164249423Sdim if (!Opnd1->getValue()->hasOneUse()) 1165249423Sdim return false; 1166249423Sdim 1167249423Sdim const APInt &C1 = Opnd1->getConstPart(); 1168249423Sdim if (C1 != ConstOpnd) 1169249423Sdim return false; 1170249423Sdim 1171249423Sdim Value *X = Opnd1->getSymbolicPart(); 1172249423Sdim Res = createAndInstr(I, X, ~C1); 1173249423Sdim // ConstOpnd was C2, now C1 ^ C2. 1174249423Sdim ConstOpnd ^= C1; 1175249423Sdim 1176249423Sdim if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) 1177249423Sdim RedoInsts.insert(T); 1178249423Sdim return true; 1179249423Sdim } 1180249423Sdim return false; 1181249423Sdim} 1182249423Sdim 1183249423Sdim 1184249423Sdim// Helper function of OptimizeXor(). It tries to simplify 1185249423Sdim// "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a 1186249423Sdim// symbolic value. 1187249423Sdim// 1188249423Sdim// If it was successful, true is returned, and the "R" and "C" is returned 1189249423Sdim// via "Res" and "ConstOpnd", respectively (If the entire expression is 1190249423Sdim// evaluated to a constant, the Res is set to NULL); otherwise, false is 1191249423Sdim// returned, and both "Res" and "ConstOpnd" remain unchanged. 1192249423Sdimbool Reassociate::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1, XorOpnd *Opnd2, 1193249423Sdim APInt &ConstOpnd, Value *&Res) { 1194249423Sdim Value *X = Opnd1->getSymbolicPart(); 1195249423Sdim if (X != Opnd2->getSymbolicPart()) 1196249423Sdim return false; 1197249423Sdim 1198249423Sdim // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.) 1199249423Sdim int DeadInstNum = 1; 1200249423Sdim if (Opnd1->getValue()->hasOneUse()) 1201249423Sdim DeadInstNum++; 1202249423Sdim if (Opnd2->getValue()->hasOneUse()) 1203249423Sdim DeadInstNum++; 1204249423Sdim 1205249423Sdim // Xor-Rule 2: 1206249423Sdim // (x | c1) ^ (x & c2) 1207249423Sdim // = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1 1208249423Sdim // = (x & ~c1) ^ (x & c2) ^ c1 // Xor-Rule 1 1209249423Sdim // = (x & c3) ^ c1, where c3 = ~c1 ^ c2 // Xor-rule 3 1210249423Sdim // 1211249423Sdim if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) { 1212249423Sdim if (Opnd2->isOrExpr()) 1213249423Sdim std::swap(Opnd1, Opnd2); 1214249423Sdim 1215251662Sdim const APInt &C1 = Opnd1->getConstPart(); 1216251662Sdim const APInt &C2 = Opnd2->getConstPart(); 1217249423Sdim APInt C3((~C1) ^ C2); 1218249423Sdim 1219249423Sdim // Do not increase code size! 1220249423Sdim if (C3 != 0 && !C3.isAllOnesValue()) { 1221249423Sdim int NewInstNum = ConstOpnd != 0 ? 1 : 2; 1222249423Sdim if (NewInstNum > DeadInstNum) 1223249423Sdim return false; 1224249423Sdim } 1225249423Sdim 1226249423Sdim Res = createAndInstr(I, X, C3); 1227249423Sdim ConstOpnd ^= C1; 1228249423Sdim 1229249423Sdim } else if (Opnd1->isOrExpr()) { 1230249423Sdim // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2 1231249423Sdim // 1232251662Sdim const APInt &C1 = Opnd1->getConstPart(); 1233251662Sdim const APInt &C2 = Opnd2->getConstPart(); 1234249423Sdim APInt C3 = C1 ^ C2; 1235249423Sdim 1236249423Sdim // Do not increase code size 1237249423Sdim if (C3 != 0 && !C3.isAllOnesValue()) { 1238249423Sdim int NewInstNum = ConstOpnd != 0 ? 1 : 2; 1239249423Sdim if (NewInstNum > DeadInstNum) 1240249423Sdim return false; 1241249423Sdim } 1242249423Sdim 1243249423Sdim Res = createAndInstr(I, X, C3); 1244249423Sdim ConstOpnd ^= C3; 1245249423Sdim } else { 1246249423Sdim // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2)) 1247249423Sdim // 1248251662Sdim const APInt &C1 = Opnd1->getConstPart(); 1249251662Sdim const APInt &C2 = Opnd2->getConstPart(); 1250249423Sdim APInt C3 = C1 ^ C2; 1251249423Sdim Res = createAndInstr(I, X, C3); 1252249423Sdim } 1253249423Sdim 1254249423Sdim // Put the original operands in the Redo list; hope they will be deleted 1255249423Sdim // as dead code. 1256249423Sdim if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue())) 1257249423Sdim RedoInsts.insert(T); 1258249423Sdim if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue())) 1259249423Sdim RedoInsts.insert(T); 1260249423Sdim 1261249423Sdim return true; 1262249423Sdim} 1263249423Sdim 1264249423Sdim/// Optimize a series of operands to an 'xor' instruction. If it can be reduced 1265249423Sdim/// to a single Value, it is returned, otherwise the Ops list is mutated as 1266249423Sdim/// necessary. 1267249423SdimValue *Reassociate::OptimizeXor(Instruction *I, 1268249423Sdim SmallVectorImpl<ValueEntry> &Ops) { 1269249423Sdim if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops)) 1270249423Sdim return V; 1271249423Sdim 1272249423Sdim if (Ops.size() == 1) 1273249423Sdim return 0; 1274249423Sdim 1275249423Sdim SmallVector<XorOpnd, 8> Opnds; 1276251662Sdim SmallVector<XorOpnd*, 8> OpndPtrs; 1277249423Sdim Type *Ty = Ops[0].Op->getType(); 1278249423Sdim APInt ConstOpnd(Ty->getIntegerBitWidth(), 0); 1279249423Sdim 1280249423Sdim // Step 1: Convert ValueEntry to XorOpnd 1281249423Sdim for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1282249423Sdim Value *V = Ops[i].Op; 1283249423Sdim if (!isa<ConstantInt>(V)) { 1284249423Sdim XorOpnd O(V); 1285249423Sdim O.setSymbolicRank(getRank(O.getSymbolicPart())); 1286249423Sdim Opnds.push_back(O); 1287249423Sdim } else 1288249423Sdim ConstOpnd ^= cast<ConstantInt>(V)->getValue(); 1289249423Sdim } 1290249423Sdim 1291251662Sdim // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds". 1292251662Sdim // It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate 1293251662Sdim // the "OpndPtrs" as well. For the similar reason, do not fuse this loop 1294251662Sdim // with the previous loop --- the iterator of the "Opnds" may be invalidated 1295251662Sdim // when new elements are added to the vector. 1296251662Sdim for (unsigned i = 0, e = Opnds.size(); i != e; ++i) 1297251662Sdim OpndPtrs.push_back(&Opnds[i]); 1298251662Sdim 1299249423Sdim // Step 2: Sort the Xor-Operands in a way such that the operands containing 1300249423Sdim // the same symbolic value cluster together. For instance, the input operand 1301249423Sdim // sequence ("x | 123", "y & 456", "x & 789") will be sorted into: 1302249423Sdim // ("x | 123", "x & 789", "y & 456"). 1303251662Sdim std::sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor()); 1304249423Sdim 1305249423Sdim // Step 3: Combine adjacent operands 1306249423Sdim XorOpnd *PrevOpnd = 0; 1307249423Sdim bool Changed = false; 1308249423Sdim for (unsigned i = 0, e = Opnds.size(); i < e; i++) { 1309251662Sdim XorOpnd *CurrOpnd = OpndPtrs[i]; 1310249423Sdim // The combined value 1311249423Sdim Value *CV; 1312249423Sdim 1313249423Sdim // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd" 1314249423Sdim if (ConstOpnd != 0 && CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) { 1315249423Sdim Changed = true; 1316249423Sdim if (CV) 1317249423Sdim *CurrOpnd = XorOpnd(CV); 1318249423Sdim else { 1319249423Sdim CurrOpnd->Invalidate(); 1320249423Sdim continue; 1321249423Sdim } 1322249423Sdim } 1323249423Sdim 1324249423Sdim if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) { 1325249423Sdim PrevOpnd = CurrOpnd; 1326249423Sdim continue; 1327249423Sdim } 1328249423Sdim 1329249423Sdim // step 3.2: When previous and current operands share the same symbolic 1330249423Sdim // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd" 1331249423Sdim // 1332249423Sdim if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) { 1333249423Sdim // Remove previous operand 1334249423Sdim PrevOpnd->Invalidate(); 1335249423Sdim if (CV) { 1336249423Sdim *CurrOpnd = XorOpnd(CV); 1337249423Sdim PrevOpnd = CurrOpnd; 1338249423Sdim } else { 1339249423Sdim CurrOpnd->Invalidate(); 1340249423Sdim PrevOpnd = 0; 1341249423Sdim } 1342249423Sdim Changed = true; 1343249423Sdim } 1344249423Sdim } 1345249423Sdim 1346249423Sdim // Step 4: Reassemble the Ops 1347249423Sdim if (Changed) { 1348249423Sdim Ops.clear(); 1349249423Sdim for (unsigned int i = 0, e = Opnds.size(); i < e; i++) { 1350249423Sdim XorOpnd &O = Opnds[i]; 1351249423Sdim if (O.isInvalid()) 1352249423Sdim continue; 1353249423Sdim ValueEntry VE(getRank(O.getValue()), O.getValue()); 1354249423Sdim Ops.push_back(VE); 1355249423Sdim } 1356249423Sdim if (ConstOpnd != 0) { 1357249423Sdim Value *C = ConstantInt::get(Ty->getContext(), ConstOpnd); 1358249423Sdim ValueEntry VE(getRank(C), C); 1359249423Sdim Ops.push_back(VE); 1360249423Sdim } 1361249423Sdim int Sz = Ops.size(); 1362249423Sdim if (Sz == 1) 1363249423Sdim return Ops.back().Op; 1364249423Sdim else if (Sz == 0) { 1365249423Sdim assert(ConstOpnd == 0); 1366249423Sdim return ConstantInt::get(Ty->getContext(), ConstOpnd); 1367249423Sdim } 1368249423Sdim } 1369249423Sdim 1370249423Sdim return 0; 1371249423Sdim} 1372249423Sdim 1373201360Srdivacky/// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This 1374201360Srdivacky/// optimizes based on identities. If it can be reduced to a single Value, it 1375201360Srdivacky/// is returned, otherwise the Ops list is mutated as necessary. 1376201360SrdivackyValue *Reassociate::OptimizeAdd(Instruction *I, 1377201360Srdivacky SmallVectorImpl<ValueEntry> &Ops) { 1378201360Srdivacky // Scan the operand lists looking for X and -X pairs. If we find any, we 1379201360Srdivacky // can simplify the expression. X+-X == 0. While we're at it, scan for any 1380201360Srdivacky // duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z. 1381201360Srdivacky // 1382201360Srdivacky // TODO: We could handle "X + ~X" -> "-1" if we wanted, since "-X = ~X+1". 1383201360Srdivacky // 1384201360Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1385201360Srdivacky Value *TheOp = Ops[i].Op; 1386201360Srdivacky // Check to see if we've seen this operand before. If so, we factor all 1387201360Srdivacky // instances of the operand together. Due to our sorting criteria, we know 1388201360Srdivacky // that these need to be next to each other in the vector. 1389201360Srdivacky if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) { 1390201360Srdivacky // Rescan the list, remove all instances of this operand from the expr. 1391201360Srdivacky unsigned NumFound = 0; 1392201360Srdivacky do { 1393201360Srdivacky Ops.erase(Ops.begin()+i); 1394201360Srdivacky ++NumFound; 1395201360Srdivacky } while (i != Ops.size() && Ops[i].Op == TheOp); 1396239462Sdim 1397201360Srdivacky DEBUG(errs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n'); 1398201360Srdivacky ++NumFactor; 1399239462Sdim 1400201360Srdivacky // Insert a new multiply. 1401201360Srdivacky Value *Mul = ConstantInt::get(cast<IntegerType>(I->getType()), NumFound); 1402201360Srdivacky Mul = BinaryOperator::CreateMul(TheOp, Mul, "factor", I); 1403239462Sdim 1404201360Srdivacky // Now that we have inserted a multiply, optimize it. This allows us to 1405201360Srdivacky // handle cases that require multiple factoring steps, such as this: 1406201360Srdivacky // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6 1407239462Sdim RedoInsts.insert(cast<Instruction>(Mul)); 1408239462Sdim 1409201360Srdivacky // If every add operand was a duplicate, return the multiply. 1410201360Srdivacky if (Ops.empty()) 1411201360Srdivacky return Mul; 1412239462Sdim 1413201360Srdivacky // Otherwise, we had some input that didn't have the dupe, such as 1414201360Srdivacky // "A + A + B" -> "A*2 + B". Add the new multiply to the list of 1415201360Srdivacky // things being added by this operation. 1416201360Srdivacky Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul)); 1417239462Sdim 1418201360Srdivacky --i; 1419201360Srdivacky e = Ops.size(); 1420201360Srdivacky continue; 1421201360Srdivacky } 1422239462Sdim 1423201360Srdivacky // Check for X and -X in the operand list. 1424201360Srdivacky if (!BinaryOperator::isNeg(TheOp)) 1425201360Srdivacky continue; 1426239462Sdim 1427201360Srdivacky Value *X = BinaryOperator::getNegArgument(TheOp); 1428201360Srdivacky unsigned FoundX = FindInOperandList(Ops, i, X); 1429201360Srdivacky if (FoundX == i) 1430201360Srdivacky continue; 1431239462Sdim 1432201360Srdivacky // Remove X and -X from the operand list. 1433201360Srdivacky if (Ops.size() == 2) 1434201360Srdivacky return Constant::getNullValue(X->getType()); 1435239462Sdim 1436201360Srdivacky Ops.erase(Ops.begin()+i); 1437201360Srdivacky if (i < FoundX) 1438201360Srdivacky --FoundX; 1439201360Srdivacky else 1440201360Srdivacky --i; // Need to back up an extra one. 1441201360Srdivacky Ops.erase(Ops.begin()+FoundX); 1442201360Srdivacky ++NumAnnihil; 1443201360Srdivacky --i; // Revisit element. 1444201360Srdivacky e -= 2; // Removed two elements. 1445201360Srdivacky } 1446239462Sdim 1447201360Srdivacky // Scan the operand list, checking to see if there are any common factors 1448201360Srdivacky // between operands. Consider something like A*A+A*B*C+D. We would like to 1449201360Srdivacky // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies. 1450201360Srdivacky // To efficiently find this, we count the number of times a factor occurs 1451201360Srdivacky // for any ADD operands that are MULs. 1452201360Srdivacky DenseMap<Value*, unsigned> FactorOccurrences; 1453239462Sdim 1454201360Srdivacky // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4) 1455201360Srdivacky // where they are actually the same multiply. 1456201360Srdivacky unsigned MaxOcc = 0; 1457201360Srdivacky Value *MaxOccVal = 0; 1458201360Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1459239462Sdim BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul); 1460239462Sdim if (!BOp) 1461201360Srdivacky continue; 1462239462Sdim 1463201360Srdivacky // Compute all of the factors of this added value. 1464201360Srdivacky SmallVector<Value*, 8> Factors; 1465239462Sdim FindSingleUseMultiplyFactors(BOp, Factors, Ops); 1466201360Srdivacky assert(Factors.size() > 1 && "Bad linearize!"); 1467239462Sdim 1468201360Srdivacky // Add one to FactorOccurrences for each unique factor in this op. 1469201360Srdivacky SmallPtrSet<Value*, 8> Duplicates; 1470201360Srdivacky for (unsigned i = 0, e = Factors.size(); i != e; ++i) { 1471201360Srdivacky Value *Factor = Factors[i]; 1472201360Srdivacky if (!Duplicates.insert(Factor)) continue; 1473239462Sdim 1474201360Srdivacky unsigned Occ = ++FactorOccurrences[Factor]; 1475201360Srdivacky if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } 1476239462Sdim 1477201360Srdivacky // If Factor is a negative constant, add the negated value as a factor 1478201360Srdivacky // because we can percolate the negate out. Watch for minint, which 1479201360Srdivacky // cannot be positivified. 1480201360Srdivacky if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) 1481224145Sdim if (CI->isNegative() && !CI->isMinValue(true)) { 1482201360Srdivacky Factor = ConstantInt::get(CI->getContext(), -CI->getValue()); 1483201360Srdivacky assert(!Duplicates.count(Factor) && 1484201360Srdivacky "Shouldn't have two constant factors, missed a canonicalize"); 1485239462Sdim 1486201360Srdivacky unsigned Occ = ++FactorOccurrences[Factor]; 1487201360Srdivacky if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } 1488201360Srdivacky } 1489201360Srdivacky } 1490201360Srdivacky } 1491239462Sdim 1492201360Srdivacky // If any factor occurred more than one time, we can pull it out. 1493201360Srdivacky if (MaxOcc > 1) { 1494201360Srdivacky DEBUG(errs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n'); 1495201360Srdivacky ++NumFactor; 1496193323Sed 1497201360Srdivacky // Create a new instruction that uses the MaxOccVal twice. If we don't do 1498201360Srdivacky // this, we could otherwise run into situations where removing a factor 1499239462Sdim // from an expression will drop a use of maxocc, and this can cause 1500201360Srdivacky // RemoveFactorFromExpression on successive values to behave differently. 1501201360Srdivacky Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal); 1502234982Sdim SmallVector<WeakVH, 4> NewMulOps; 1503218893Sdim for (unsigned i = 0; i != Ops.size(); ++i) { 1504202375Srdivacky // Only try to remove factors from expressions we're allowed to. 1505239462Sdim BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul); 1506239462Sdim if (!BOp) 1507202375Srdivacky continue; 1508239462Sdim 1509201360Srdivacky if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) { 1510218893Sdim // The factorized operand may occur several times. Convert them all in 1511218893Sdim // one fell swoop. 1512218893Sdim for (unsigned j = Ops.size(); j != i;) { 1513218893Sdim --j; 1514218893Sdim if (Ops[j].Op == Ops[i].Op) { 1515218893Sdim NewMulOps.push_back(V); 1516218893Sdim Ops.erase(Ops.begin()+j); 1517218893Sdim } 1518218893Sdim } 1519218893Sdim --i; 1520201360Srdivacky } 1521201360Srdivacky } 1522239462Sdim 1523201360Srdivacky // No need for extra uses anymore. 1524201360Srdivacky delete DummyInst; 1525202375Srdivacky 1526201360Srdivacky unsigned NumAddedValues = NewMulOps.size(); 1527201360Srdivacky Value *V = EmitAddTreeOfValues(I, NewMulOps); 1528202375Srdivacky 1529201360Srdivacky // Now that we have inserted the add tree, optimize it. This allows us to 1530201360Srdivacky // handle cases that require multiple factoring steps, such as this: 1531201360Srdivacky // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C)) 1532201360Srdivacky assert(NumAddedValues > 1 && "Each occurrence should contribute a value"); 1533202375Srdivacky (void)NumAddedValues; 1534239462Sdim if (Instruction *VI = dyn_cast<Instruction>(V)) 1535239462Sdim RedoInsts.insert(VI); 1536201360Srdivacky 1537201360Srdivacky // Create the multiply. 1538239462Sdim Instruction *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I); 1539201360Srdivacky 1540201360Srdivacky // Rerun associate on the multiply in case the inner expression turned into 1541201360Srdivacky // a multiply. We want to make sure that we keep things in canonical form. 1542239462Sdim RedoInsts.insert(V2); 1543239462Sdim 1544201360Srdivacky // If every add operand included the factor (e.g. "A*B + A*C"), then the 1545201360Srdivacky // entire result expression is just the multiply "A*(B+C)". 1546201360Srdivacky if (Ops.empty()) 1547201360Srdivacky return V2; 1548239462Sdim 1549201360Srdivacky // Otherwise, we had some input that didn't have the factor, such as 1550201360Srdivacky // "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of 1551201360Srdivacky // things being added by this operation. 1552201360Srdivacky Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2)); 1553201360Srdivacky } 1554239462Sdim 1555201360Srdivacky return 0; 1556201360Srdivacky} 1557201360Srdivacky 1558239462Sdimnamespace { 1559239462Sdim /// \brief Predicate tests whether a ValueEntry's op is in a map. 1560239462Sdim struct IsValueInMap { 1561239462Sdim const DenseMap<Value *, unsigned> ⤅ 1562239462Sdim 1563239462Sdim IsValueInMap(const DenseMap<Value *, unsigned> &Map) : Map(Map) {} 1564239462Sdim 1565239462Sdim bool operator()(const ValueEntry &Entry) { 1566239462Sdim return Map.find(Entry.Op) != Map.end(); 1567239462Sdim } 1568239462Sdim }; 1569239462Sdim} 1570239462Sdim 1571239462Sdim/// \brief Build up a vector of value/power pairs factoring a product. 1572239462Sdim/// 1573239462Sdim/// Given a series of multiplication operands, build a vector of factors and 1574239462Sdim/// the powers each is raised to when forming the final product. Sort them in 1575239462Sdim/// the order of descending power. 1576239462Sdim/// 1577239462Sdim/// (x*x) -> [(x, 2)] 1578239462Sdim/// ((x*x)*x) -> [(x, 3)] 1579239462Sdim/// ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)] 1580239462Sdim/// 1581239462Sdim/// \returns Whether any factors have a power greater than one. 1582239462Sdimbool Reassociate::collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops, 1583239462Sdim SmallVectorImpl<Factor> &Factors) { 1584239462Sdim // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this. 1585239462Sdim // Compute the sum of powers of simplifiable factors. 1586239462Sdim unsigned FactorPowerSum = 0; 1587239462Sdim for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) { 1588239462Sdim Value *Op = Ops[Idx-1].Op; 1589239462Sdim 1590239462Sdim // Count the number of occurrences of this value. 1591239462Sdim unsigned Count = 1; 1592239462Sdim for (; Idx < Size && Ops[Idx].Op == Op; ++Idx) 1593239462Sdim ++Count; 1594239462Sdim // Track for simplification all factors which occur 2 or more times. 1595239462Sdim if (Count > 1) 1596239462Sdim FactorPowerSum += Count; 1597239462Sdim } 1598239462Sdim 1599239462Sdim // We can only simplify factors if the sum of the powers of our simplifiable 1600239462Sdim // factors is 4 or higher. When that is the case, we will *always* have 1601239462Sdim // a simplification. This is an important invariant to prevent cyclicly 1602239462Sdim // trying to simplify already minimal formations. 1603239462Sdim if (FactorPowerSum < 4) 1604239462Sdim return false; 1605239462Sdim 1606239462Sdim // Now gather the simplifiable factors, removing them from Ops. 1607239462Sdim FactorPowerSum = 0; 1608239462Sdim for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) { 1609239462Sdim Value *Op = Ops[Idx-1].Op; 1610239462Sdim 1611239462Sdim // Count the number of occurrences of this value. 1612239462Sdim unsigned Count = 1; 1613239462Sdim for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx) 1614239462Sdim ++Count; 1615239462Sdim if (Count == 1) 1616239462Sdim continue; 1617239462Sdim // Move an even number of occurrences to Factors. 1618239462Sdim Count &= ~1U; 1619239462Sdim Idx -= Count; 1620239462Sdim FactorPowerSum += Count; 1621239462Sdim Factors.push_back(Factor(Op, Count)); 1622239462Sdim Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count); 1623239462Sdim } 1624239462Sdim 1625239462Sdim // None of the adjustments above should have reduced the sum of factor powers 1626239462Sdim // below our mininum of '4'. 1627239462Sdim assert(FactorPowerSum >= 4); 1628239462Sdim 1629239462Sdim std::sort(Factors.begin(), Factors.end(), Factor::PowerDescendingSorter()); 1630239462Sdim return true; 1631239462Sdim} 1632239462Sdim 1633239462Sdim/// \brief Build a tree of multiplies, computing the product of Ops. 1634239462Sdimstatic Value *buildMultiplyTree(IRBuilder<> &Builder, 1635239462Sdim SmallVectorImpl<Value*> &Ops) { 1636239462Sdim if (Ops.size() == 1) 1637239462Sdim return Ops.back(); 1638239462Sdim 1639239462Sdim Value *LHS = Ops.pop_back_val(); 1640239462Sdim do { 1641239462Sdim LHS = Builder.CreateMul(LHS, Ops.pop_back_val()); 1642239462Sdim } while (!Ops.empty()); 1643239462Sdim 1644239462Sdim return LHS; 1645239462Sdim} 1646239462Sdim 1647239462Sdim/// \brief Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*... 1648239462Sdim/// 1649239462Sdim/// Given a vector of values raised to various powers, where no two values are 1650239462Sdim/// equal and the powers are sorted in decreasing order, compute the minimal 1651239462Sdim/// DAG of multiplies to compute the final product, and return that product 1652239462Sdim/// value. 1653239462SdimValue *Reassociate::buildMinimalMultiplyDAG(IRBuilder<> &Builder, 1654239462Sdim SmallVectorImpl<Factor> &Factors) { 1655239462Sdim assert(Factors[0].Power); 1656239462Sdim SmallVector<Value *, 4> OuterProduct; 1657239462Sdim for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size(); 1658239462Sdim Idx < Size && Factors[Idx].Power > 0; ++Idx) { 1659239462Sdim if (Factors[Idx].Power != Factors[LastIdx].Power) { 1660239462Sdim LastIdx = Idx; 1661239462Sdim continue; 1662239462Sdim } 1663239462Sdim 1664239462Sdim // We want to multiply across all the factors with the same power so that 1665239462Sdim // we can raise them to that power as a single entity. Build a mini tree 1666239462Sdim // for that. 1667239462Sdim SmallVector<Value *, 4> InnerProduct; 1668239462Sdim InnerProduct.push_back(Factors[LastIdx].Base); 1669239462Sdim do { 1670239462Sdim InnerProduct.push_back(Factors[Idx].Base); 1671239462Sdim ++Idx; 1672239462Sdim } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power); 1673239462Sdim 1674239462Sdim // Reset the base value of the first factor to the new expression tree. 1675239462Sdim // We'll remove all the factors with the same power in a second pass. 1676239462Sdim Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct); 1677239462Sdim if (Instruction *MI = dyn_cast<Instruction>(M)) 1678239462Sdim RedoInsts.insert(MI); 1679239462Sdim 1680239462Sdim LastIdx = Idx; 1681239462Sdim } 1682239462Sdim // Unique factors with equal powers -- we've folded them into the first one's 1683239462Sdim // base. 1684239462Sdim Factors.erase(std::unique(Factors.begin(), Factors.end(), 1685239462Sdim Factor::PowerEqual()), 1686239462Sdim Factors.end()); 1687239462Sdim 1688239462Sdim // Iteratively collect the base of each factor with an add power into the 1689239462Sdim // outer product, and halve each power in preparation for squaring the 1690239462Sdim // expression. 1691239462Sdim for (unsigned Idx = 0, Size = Factors.size(); Idx != Size; ++Idx) { 1692239462Sdim if (Factors[Idx].Power & 1) 1693239462Sdim OuterProduct.push_back(Factors[Idx].Base); 1694239462Sdim Factors[Idx].Power >>= 1; 1695239462Sdim } 1696239462Sdim if (Factors[0].Power) { 1697239462Sdim Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors); 1698239462Sdim OuterProduct.push_back(SquareRoot); 1699239462Sdim OuterProduct.push_back(SquareRoot); 1700239462Sdim } 1701239462Sdim if (OuterProduct.size() == 1) 1702239462Sdim return OuterProduct.front(); 1703239462Sdim 1704239462Sdim Value *V = buildMultiplyTree(Builder, OuterProduct); 1705239462Sdim return V; 1706239462Sdim} 1707239462Sdim 1708239462SdimValue *Reassociate::OptimizeMul(BinaryOperator *I, 1709239462Sdim SmallVectorImpl<ValueEntry> &Ops) { 1710239462Sdim // We can only optimize the multiplies when there is a chain of more than 1711239462Sdim // three, such that a balanced tree might require fewer total multiplies. 1712239462Sdim if (Ops.size() < 4) 1713239462Sdim return 0; 1714239462Sdim 1715239462Sdim // Try to turn linear trees of multiplies without other uses of the 1716239462Sdim // intermediate stages into minimal multiply DAGs with perfect sub-expression 1717239462Sdim // re-use. 1718239462Sdim SmallVector<Factor, 4> Factors; 1719239462Sdim if (!collectMultiplyFactors(Ops, Factors)) 1720239462Sdim return 0; // All distinct factors, so nothing left for us to do. 1721239462Sdim 1722239462Sdim IRBuilder<> Builder(I); 1723239462Sdim Value *V = buildMinimalMultiplyDAG(Builder, Factors); 1724239462Sdim if (Ops.empty()) 1725239462Sdim return V; 1726239462Sdim 1727239462Sdim ValueEntry NewEntry = ValueEntry(getRank(V), V); 1728239462Sdim Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry); 1729239462Sdim return 0; 1730239462Sdim} 1731239462Sdim 1732193323SedValue *Reassociate::OptimizeExpression(BinaryOperator *I, 1733201360Srdivacky SmallVectorImpl<ValueEntry> &Ops) { 1734193323Sed // Now that we have the linearized expression tree, try to optimize it. 1735193323Sed // Start by folding any constants that we found. 1736243830Sdim Constant *Cst = 0; 1737243830Sdim unsigned Opcode = I->getOpcode(); 1738243830Sdim while (!Ops.empty() && isa<Constant>(Ops.back().Op)) { 1739243830Sdim Constant *C = cast<Constant>(Ops.pop_back_val().Op); 1740243830Sdim Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C; 1741243830Sdim } 1742243830Sdim // If there was nothing but constants then we are done. 1743243830Sdim if (Ops.empty()) 1744243830Sdim return Cst; 1745243830Sdim 1746243830Sdim // Put the combined constant back at the end of the operand list, except if 1747243830Sdim // there is no point. For example, an add of 0 gets dropped here, while a 1748243830Sdim // multiplication by zero turns the whole expression into zero. 1749243830Sdim if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) { 1750243830Sdim if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType())) 1751243830Sdim return Cst; 1752243830Sdim Ops.push_back(ValueEntry(0, Cst)); 1753243830Sdim } 1754243830Sdim 1755193323Sed if (Ops.size() == 1) return Ops[0].Op; 1756193323Sed 1757201360Srdivacky // Handle destructive annihilation due to identities between elements in the 1758193323Sed // argument list here. 1759239462Sdim unsigned NumOps = Ops.size(); 1760193323Sed switch (Opcode) { 1761193323Sed default: break; 1762193323Sed case Instruction::And: 1763193323Sed case Instruction::Or: 1764201360Srdivacky if (Value *Result = OptimizeAndOrXor(Opcode, Ops)) 1765201360Srdivacky return Result; 1766193323Sed break; 1767193323Sed 1768249423Sdim case Instruction::Xor: 1769249423Sdim if (Value *Result = OptimizeXor(I, Ops)) 1770249423Sdim return Result; 1771249423Sdim break; 1772249423Sdim 1773239462Sdim case Instruction::Add: 1774201360Srdivacky if (Value *Result = OptimizeAdd(I, Ops)) 1775201360Srdivacky return Result; 1776239462Sdim break; 1777193323Sed 1778239462Sdim case Instruction::Mul: 1779239462Sdim if (Value *Result = OptimizeMul(I, Ops)) 1780239462Sdim return Result; 1781193323Sed break; 1782193323Sed } 1783193323Sed 1784239462Sdim if (Ops.size() != NumOps) 1785193323Sed return OptimizeExpression(I, Ops); 1786193323Sed return 0; 1787193323Sed} 1788193323Sed 1789239462Sdim/// EraseInst - Zap the given instruction, adding interesting operands to the 1790239462Sdim/// work list. 1791239462Sdimvoid Reassociate::EraseInst(Instruction *I) { 1792239462Sdim assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!"); 1793239462Sdim SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end()); 1794239462Sdim // Erase the dead instruction. 1795239462Sdim ValueRankMap.erase(I); 1796239462Sdim RedoInsts.remove(I); 1797239462Sdim I->eraseFromParent(); 1798239462Sdim // Optimize its operands. 1799239462Sdim SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes. 1800239462Sdim for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1801239462Sdim if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) { 1802239462Sdim // If this is a node in an expression tree, climb to the expression root 1803239462Sdim // and add that since that's where optimization actually happens. 1804239462Sdim unsigned Opcode = Op->getOpcode(); 1805239462Sdim while (Op->hasOneUse() && Op->use_back()->getOpcode() == Opcode && 1806239462Sdim Visited.insert(Op)) 1807239462Sdim Op = Op->use_back(); 1808239462Sdim RedoInsts.insert(Op); 1809239462Sdim } 1810239462Sdim} 1811193323Sed 1812239462Sdim/// OptimizeInst - Inspect and optimize the given instruction. Note that erasing 1813239462Sdim/// instructions is not allowed. 1814239462Sdimvoid Reassociate::OptimizeInst(Instruction *I) { 1815239462Sdim // Only consider operations that we understand. 1816239462Sdim if (!isa<BinaryOperator>(I)) 1817239462Sdim return; 1818239462Sdim 1819239462Sdim if (I->getOpcode() == Instruction::Shl && 1820239462Sdim isa<ConstantInt>(I->getOperand(1))) 1821239462Sdim // If an operand of this shift is a reassociable multiply, or if the shift 1822239462Sdim // is used by a reassociable multiply or add, turn into a multiply. 1823239462Sdim if (isReassociableOp(I->getOperand(0), Instruction::Mul) || 1824239462Sdim (I->hasOneUse() && 1825239462Sdim (isReassociableOp(I->use_back(), Instruction::Mul) || 1826239462Sdim isReassociableOp(I->use_back(), Instruction::Add)))) { 1827239462Sdim Instruction *NI = ConvertShiftToMul(I); 1828239462Sdim RedoInsts.insert(I); 1829221345Sdim MadeChange = true; 1830239462Sdim I = NI; 1831221345Sdim } 1832193323Sed 1833239462Sdim // Floating point binary operators are not associative, but we can still 1834239462Sdim // commute (some) of them, to canonicalize the order of their operands. 1835239462Sdim // This can potentially expose more CSE opportunities, and makes writing 1836239462Sdim // other transformations simpler. 1837239462Sdim if ((I->getType()->isFloatingPointTy() || I->getType()->isVectorTy())) { 1838239462Sdim // FAdd and FMul can be commuted. 1839239462Sdim if (I->getOpcode() != Instruction::FMul && 1840239462Sdim I->getOpcode() != Instruction::FAdd) 1841239462Sdim return; 1842193323Sed 1843239462Sdim Value *LHS = I->getOperand(0); 1844239462Sdim Value *RHS = I->getOperand(1); 1845239462Sdim unsigned LHSRank = getRank(LHS); 1846239462Sdim unsigned RHSRank = getRank(RHS); 1847239462Sdim 1848239462Sdim // Sort the operands by rank. 1849239462Sdim if (RHSRank < LHSRank) { 1850239462Sdim I->setOperand(0, RHS); 1851239462Sdim I->setOperand(1, LHS); 1852239462Sdim } 1853239462Sdim 1854239462Sdim return; 1855239462Sdim } 1856239462Sdim 1857221345Sdim // Do not reassociate boolean (i1) expressions. We want to preserve the 1858221345Sdim // original order of evaluation for short-circuited comparisons that 1859221345Sdim // SimplifyCFG has folded to AND/OR expressions. If the expression 1860221345Sdim // is not further optimized, it is likely to be transformed back to a 1861221345Sdim // short-circuited form for code gen, and the source order may have been 1862221345Sdim // optimized for the most likely conditions. 1863239462Sdim if (I->getType()->isIntegerTy(1)) 1864221345Sdim return; 1865203954Srdivacky 1866221345Sdim // If this is a subtract instruction which is not already in negate form, 1867221345Sdim // see if we can convert it to X+-Y. 1868239462Sdim if (I->getOpcode() == Instruction::Sub) { 1869239462Sdim if (ShouldBreakUpSubtract(I)) { 1870239462Sdim Instruction *NI = BreakUpSubtract(I); 1871239462Sdim RedoInsts.insert(I); 1872221345Sdim MadeChange = true; 1873239462Sdim I = NI; 1874239462Sdim } else if (BinaryOperator::isNeg(I)) { 1875221345Sdim // Otherwise, this is a negation. See if the operand is a multiply tree 1876221345Sdim // and if this is not an inner node of a multiply tree. 1877239462Sdim if (isReassociableOp(I->getOperand(1), Instruction::Mul) && 1878239462Sdim (!I->hasOneUse() || 1879239462Sdim !isReassociableOp(I->use_back(), Instruction::Mul))) { 1880239462Sdim Instruction *NI = LowerNegateToMultiply(I); 1881239462Sdim RedoInsts.insert(I); 1882193323Sed MadeChange = true; 1883239462Sdim I = NI; 1884193323Sed } 1885193323Sed } 1886221345Sdim } 1887193323Sed 1888239462Sdim // If this instruction is an associative binary operator, process it. 1889239462Sdim if (!I->isAssociative()) return; 1890239462Sdim BinaryOperator *BO = cast<BinaryOperator>(I); 1891193323Sed 1892221345Sdim // If this is an interior node of a reassociable tree, ignore it until we 1893221345Sdim // get to the root of the tree, to avoid N^2 analysis. 1894239462Sdim unsigned Opcode = BO->getOpcode(); 1895239462Sdim if (BO->hasOneUse() && BO->use_back()->getOpcode() == Opcode) 1896221345Sdim return; 1897193323Sed 1898239462Sdim // If this is an add tree that is used by a sub instruction, ignore it 1899221345Sdim // until we process the subtract. 1900239462Sdim if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add && 1901239462Sdim cast<Instruction>(BO->use_back())->getOpcode() == Instruction::Sub) 1902221345Sdim return; 1903193323Sed 1904239462Sdim ReassociateExpression(BO); 1905193323Sed} 1906193323Sed 1907239462Sdimvoid Reassociate::ReassociateExpression(BinaryOperator *I) { 1908239462Sdim 1909201360Srdivacky // First, walk the expression tree, linearizing the tree, collecting the 1910201360Srdivacky // operand information. 1911239462Sdim SmallVector<RepeatedValue, 8> Tree; 1912239462Sdim MadeChange |= LinearizeExprTree(I, Tree); 1913201360Srdivacky SmallVector<ValueEntry, 8> Ops; 1914239462Sdim Ops.reserve(Tree.size()); 1915239462Sdim for (unsigned i = 0, e = Tree.size(); i != e; ++i) { 1916239462Sdim RepeatedValue E = Tree[i]; 1917239462Sdim Ops.append(E.second.getZExtValue(), 1918239462Sdim ValueEntry(getRank(E.first), E.first)); 1919239462Sdim } 1920239462Sdim 1921202375Srdivacky DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); 1922239462Sdim 1923193323Sed // Now that we have linearized the tree to a list and have gathered all of 1924193323Sed // the operands and their ranks, sort the operands by their rank. Use a 1925193323Sed // stable_sort so that values with equal ranks will have their relative 1926193323Sed // positions maintained (and so the compiler is deterministic). Note that 1927193323Sed // this sorts so that the highest ranking values end up at the beginning of 1928193323Sed // the vector. 1929193323Sed std::stable_sort(Ops.begin(), Ops.end()); 1930239462Sdim 1931193323Sed // OptimizeExpression - Now that we have the expression tree in a convenient 1932193323Sed // sorted form, optimize it globally if possible. 1933193323Sed if (Value *V = OptimizeExpression(I, Ops)) { 1934239462Sdim if (V == I) 1935239462Sdim // Self-referential expression in unreachable code. 1936239462Sdim return; 1937193323Sed // This expression tree simplified to something that isn't a tree, 1938193323Sed // eliminate it. 1939202375Srdivacky DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n'); 1940193323Sed I->replaceAllUsesWith(V); 1941221345Sdim if (Instruction *VI = dyn_cast<Instruction>(V)) 1942221345Sdim VI->setDebugLoc(I->getDebugLoc()); 1943239462Sdim RedoInsts.insert(I); 1944201360Srdivacky ++NumAnnihil; 1945239462Sdim return; 1946193323Sed } 1947239462Sdim 1948193323Sed // We want to sink immediates as deeply as possible except in the case where 1949193323Sed // this is a multiply tree used only by an add, and the immediate is a -1. 1950193323Sed // In this case we reassociate to put the negation on the outside so that we 1951193323Sed // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y 1952193323Sed if (I->getOpcode() == Instruction::Mul && I->hasOneUse() && 1953193323Sed cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add && 1954193323Sed isa<ConstantInt>(Ops.back().Op) && 1955193323Sed cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) { 1956201360Srdivacky ValueEntry Tmp = Ops.pop_back_val(); 1957201360Srdivacky Ops.insert(Ops.begin(), Tmp); 1958193323Sed } 1959239462Sdim 1960202375Srdivacky DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n'); 1961239462Sdim 1962193323Sed if (Ops.size() == 1) { 1963239462Sdim if (Ops[0].Op == I) 1964239462Sdim // Self-referential expression in unreachable code. 1965239462Sdim return; 1966239462Sdim 1967193323Sed // This expression tree simplified to something that isn't a tree, 1968193323Sed // eliminate it. 1969193323Sed I->replaceAllUsesWith(Ops[0].Op); 1970221345Sdim if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op)) 1971221345Sdim OI->setDebugLoc(I->getDebugLoc()); 1972239462Sdim RedoInsts.insert(I); 1973239462Sdim return; 1974193323Sed } 1975239462Sdim 1976201360Srdivacky // Now that we ordered and optimized the expressions, splat them back into 1977201360Srdivacky // the expression tree, removing any unneeded nodes. 1978201360Srdivacky RewriteExprTree(I, Ops); 1979193323Sed} 1980193323Sed 1981193323Sedbool Reassociate::runOnFunction(Function &F) { 1982239462Sdim // Calculate the rank map for F 1983193323Sed BuildRankMap(F); 1984193323Sed 1985193323Sed MadeChange = false; 1986239462Sdim for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1987239462Sdim // Optimize every instruction in the basic block. 1988239462Sdim for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; ) 1989239462Sdim if (isInstructionTriviallyDead(II)) { 1990239462Sdim EraseInst(II++); 1991239462Sdim } else { 1992239462Sdim OptimizeInst(II); 1993239462Sdim assert(II->getParent() == BI && "Moved to a different block!"); 1994239462Sdim ++II; 1995239462Sdim } 1996193323Sed 1997239462Sdim // If this produced extra instructions to optimize, handle them now. 1998239462Sdim while (!RedoInsts.empty()) { 1999239462Sdim Instruction *I = RedoInsts.pop_back_val(); 2000239462Sdim if (isInstructionTriviallyDead(I)) 2001239462Sdim EraseInst(I); 2002239462Sdim else 2003239462Sdim OptimizeInst(I); 2004221345Sdim } 2005239462Sdim } 2006221345Sdim 2007201360Srdivacky // We are done with the rank map. 2008193323Sed RankMap.clear(); 2009193323Sed ValueRankMap.clear(); 2010239462Sdim 2011193323Sed return MadeChange; 2012193323Sed} 2013