1//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Loop unrolling may create many similar GEPs for array accesses.
11// e.g., a 2-level loop
12//
13// float a[32][32]; // global variable
14//
15// for (int i = 0; i < 2; ++i) {
16//   for (int j = 0; j < 2; ++j) {
17//     ...
18//     ... = a[x + i][y + j];
19//     ...
20//   }
21// }
22//
23// will probably be unrolled to:
24//
25// gep %a, 0, %x, %y; load
26// gep %a, 0, %x, %y + 1; load
27// gep %a, 0, %x + 1, %y; load
28// gep %a, 0, %x + 1, %y + 1; load
29//
30// LLVM's GVN does not use partial redundancy elimination yet, and is thus
31// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
32// significant slowdown in targets with limited addressing modes. For instance,
33// because the PTX target does not support the reg+reg addressing mode, the
34// NVPTX backend emits PTX code that literally computes the pointer address of
35// each GEP, wasting tons of registers. It emits the following PTX for the
36// first load and similar PTX for other loads.
37//
38// mov.u32         %r1, %x;
39// mov.u32         %r2, %y;
40// mul.wide.u32    %rl2, %r1, 128;
41// mov.u64         %rl3, a;
42// add.s64         %rl4, %rl3, %rl2;
43// mul.wide.u32    %rl5, %r2, 4;
44// add.s64         %rl6, %rl4, %rl5;
45// ld.global.f32   %f1, [%rl6];
46//
47// To reduce the register pressure, the optimization implemented in this file
48// merges the common part of a group of GEPs, so we can compute each pointer
49// address by adding a simple offset to the common part, saving many registers.
50//
51// It works by splitting each GEP into a variadic base and a constant offset.
52// The variadic base can be computed once and reused by multiple GEPs, and the
53// constant offsets can be nicely folded into the reg+immediate addressing mode
54// (supported by most targets) without using any extra register.
55//
56// For instance, we transform the four GEPs and four loads in the above example
57// into:
58//
59// base = gep a, 0, x, y
60// load base
61// laod base + 1  * sizeof(float)
62// load base + 32 * sizeof(float)
63// load base + 33 * sizeof(float)
64//
65// Given the transformed IR, a backend that supports the reg+immediate
66// addressing mode can easily fold the pointer arithmetics into the loads. For
67// example, the NVPTX backend can easily fold the pointer arithmetics into the
68// ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
69//
70// mov.u32         %r1, %tid.x;
71// mov.u32         %r2, %tid.y;
72// mul.wide.u32    %rl2, %r1, 128;
73// mov.u64         %rl3, a;
74// add.s64         %rl4, %rl3, %rl2;
75// mul.wide.u32    %rl5, %r2, 4;
76// add.s64         %rl6, %rl4, %rl5;
77// ld.global.f32   %f1, [%rl6]; // so far the same as unoptimized PTX
78// ld.global.f32   %f2, [%rl6+4]; // much better
79// ld.global.f32   %f3, [%rl6+128]; // much better
80// ld.global.f32   %f4, [%rl6+132]; // much better
81//
82// Another improvement enabled by the LowerGEP flag is to lower a GEP with
83// multiple indices to either multiple GEPs with a single index or arithmetic
84// operations (depending on whether the target uses alias analysis in codegen).
85// Such transformation can have following benefits:
86// (1) It can always extract constants in the indices of structure type.
87// (2) After such Lowering, there are more optimization opportunities such as
88//     CSE, LICM and CGP.
89//
90// E.g. The following GEPs have multiple indices:
91//  BB1:
92//    %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
93//    load %p
94//    ...
95//  BB2:
96//    %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
97//    load %p2
98//    ...
99//
100// We can not do CSE for to the common part related to index "i64 %i". Lowering
101// GEPs can achieve such goals.
102// If the target does not use alias analysis in codegen, this pass will
103// lower a GEP with multiple indices into arithmetic operations:
104//  BB1:
105//    %1 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
106//    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
107//    %3 = add i64 %1, %2                          ; CSE opportunity
108//    %4 = mul i64 %j1, length_of_struct
109//    %5 = add i64 %3, %4
110//    %6 = add i64 %3, struct_field_3              ; Constant offset
111//    %p = inttoptr i64 %6 to i32*
112//    load %p
113//    ...
114//  BB2:
115//    %7 = ptrtoint [10 x %struct]* %ptr to i64    ; CSE opportunity
116//    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
117//    %9 = add i64 %7, %8                          ; CSE opportunity
118//    %10 = mul i64 %j2, length_of_struct
119//    %11 = add i64 %9, %10
120//    %12 = add i64 %11, struct_field_2            ; Constant offset
121//    %p = inttoptr i64 %12 to i32*
122//    load %p2
123//    ...
124//
125// If the target uses alias analysis in codegen, this pass will lower a GEP
126// with multiple indices into multiple GEPs with a single index:
127//  BB1:
128//    %1 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
129//    %2 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
130//    %3 = getelementptr i8* %1, i64 %2            ; CSE opportunity
131//    %4 = mul i64 %j1, length_of_struct
132//    %5 = getelementptr i8* %3, i64 %4
133//    %6 = getelementptr i8* %5, struct_field_3    ; Constant offset
134//    %p = bitcast i8* %6 to i32*
135//    load %p
136//    ...
137//  BB2:
138//    %7 = bitcast [10 x %struct]* %ptr to i8*     ; CSE opportunity
139//    %8 = mul i64 %i, length_of_10xstruct         ; CSE opportunity
140//    %9 = getelementptr i8* %7, i64 %8            ; CSE opportunity
141//    %10 = mul i64 %j2, length_of_struct
142//    %11 = getelementptr i8* %9, i64 %10
143//    %12 = getelementptr i8* %11, struct_field_2  ; Constant offset
144//    %p2 = bitcast i8* %12 to i32*
145//    load %p2
146//    ...
147//
148// Lowering GEPs can also benefit other passes such as LICM and CGP.
149// LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
150// indices if one of the index is variant. If we lower such GEP into invariant
151// parts and variant parts, LICM can hoist/sink those invariant parts.
152// CGP (CodeGen Prepare) tries to sink address calculations that match the
153// target's addressing modes. A GEP with multiple indices may not match and will
154// not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
155// them. So we end up with a better addressing mode.
156//
157//===----------------------------------------------------------------------===//
158
159#include "llvm/Analysis/ScalarEvolution.h"
160#include "llvm/Analysis/LoopInfo.h"
161#include "llvm/Analysis/MemoryBuiltins.h"
162#include "llvm/Analysis/TargetLibraryInfo.h"
163#include "llvm/Analysis/TargetTransformInfo.h"
164#include "llvm/Analysis/ValueTracking.h"
165#include "llvm/IR/Constants.h"
166#include "llvm/IR/DataLayout.h"
167#include "llvm/IR/Dominators.h"
168#include "llvm/IR/Instructions.h"
169#include "llvm/IR/LLVMContext.h"
170#include "llvm/IR/Module.h"
171#include "llvm/IR/PatternMatch.h"
172#include "llvm/IR/Operator.h"
173#include "llvm/Support/CommandLine.h"
174#include "llvm/Support/raw_ostream.h"
175#include "llvm/Transforms/Scalar.h"
176#include "llvm/Transforms/Utils/Local.h"
177#include "llvm/Target/TargetMachine.h"
178#include "llvm/Target/TargetSubtargetInfo.h"
179#include "llvm/IR/IRBuilder.h"
180
181using namespace llvm;
182using namespace llvm::PatternMatch;
183
184static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
185    "disable-separate-const-offset-from-gep", cl::init(false),
186    cl::desc("Do not separate the constant offset from a GEP instruction"),
187    cl::Hidden);
188// Setting this flag may emit false positives when the input module already
189// contains dead instructions. Therefore, we set it only in unit tests that are
190// free of dead code.
191static cl::opt<bool>
192    VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
193                     cl::desc("Verify this pass produces no dead code"),
194                     cl::Hidden);
195
196namespace {
197
198/// \brief A helper class for separating a constant offset from a GEP index.
199///
200/// In real programs, a GEP index may be more complicated than a simple addition
201/// of something and a constant integer which can be trivially splitted. For
202/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
203/// constant offset, so that we can separate the index to (a << 3) + b and 5.
204///
205/// Therefore, this class looks into the expression that computes a given GEP
206/// index, and tries to find a constant integer that can be hoisted to the
207/// outermost level of the expression as an addition. Not every constant in an
208/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
209/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
210/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
211class ConstantOffsetExtractor {
212public:
213  /// Extracts a constant offset from the given GEP index. It returns the
214  /// new index representing the remainder (equal to the original index minus
215  /// the constant offset), or nullptr if we cannot extract a constant offset.
216  /// \p Idx The given GEP index
217  /// \p GEP The given GEP
218  /// \p UserChainTail Outputs the tail of UserChain so that we can
219  ///                  garbage-collect unused instructions in UserChain.
220  static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
221                        User *&UserChainTail, const DominatorTree *DT);
222  /// Looks for a constant offset from the given GEP index without extracting
223  /// it. It returns the numeric value of the extracted constant offset (0 if
224  /// failed). The meaning of the arguments are the same as Extract.
225  static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
226                      const DominatorTree *DT);
227
228private:
229  ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
230      : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
231  }
232  /// Searches the expression that computes V for a non-zero constant C s.t.
233  /// V can be reassociated into the form V' + C. If the searching is
234  /// successful, returns C and update UserChain as a def-use chain from C to V;
235  /// otherwise, UserChain is empty.
236  ///
237  /// \p V            The given expression
238  /// \p SignExtended Whether V will be sign-extended in the computation of the
239  ///                 GEP index
240  /// \p ZeroExtended Whether V will be zero-extended in the computation of the
241  ///                 GEP index
242  /// \p NonNegative  Whether V is guaranteed to be non-negative. For example,
243  ///                 an index of an inbounds GEP is guaranteed to be
244  ///                 non-negative. Levaraging this, we can better split
245  ///                 inbounds GEPs.
246  APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
247  /// A helper function to look into both operands of a binary operator.
248  APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
249                            bool ZeroExtended);
250  /// After finding the constant offset C from the GEP index I, we build a new
251  /// index I' s.t. I' + C = I. This function builds and returns the new
252  /// index I' according to UserChain produced by function "find".
253  ///
254  /// The building conceptually takes two steps:
255  /// 1) iteratively distribute s/zext towards the leaves of the expression tree
256  /// that computes I
257  /// 2) reassociate the expression tree to the form I' + C.
258  ///
259  /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
260  /// sext to a, b and 5 so that we have
261  ///   sext(a) + (sext(b) + 5).
262  /// Then, we reassociate it to
263  ///   (sext(a) + sext(b)) + 5.
264  /// Given this form, we know I' is sext(a) + sext(b).
265  Value *rebuildWithoutConstOffset();
266  /// After the first step of rebuilding the GEP index without the constant
267  /// offset, distribute s/zext to the operands of all operators in UserChain.
268  /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
269  /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
270  ///
271  /// The function also updates UserChain to point to new subexpressions after
272  /// distributing s/zext. e.g., the old UserChain of the above example is
273  /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
274  /// and the new UserChain is
275  /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
276  ///   zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
277  ///
278  /// \p ChainIndex The index to UserChain. ChainIndex is initially
279  ///               UserChain.size() - 1, and is decremented during
280  ///               the recursion.
281  Value *distributeExtsAndCloneChain(unsigned ChainIndex);
282  /// Reassociates the GEP index to the form I' + C and returns I'.
283  Value *removeConstOffset(unsigned ChainIndex);
284  /// A helper function to apply ExtInsts, a list of s/zext, to value V.
285  /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
286  /// returns "sext i32 (zext i16 V to i32) to i64".
287  Value *applyExts(Value *V);
288
289  /// A helper function that returns whether we can trace into the operands
290  /// of binary operator BO for a constant offset.
291  ///
292  /// \p SignExtended Whether BO is surrounded by sext
293  /// \p ZeroExtended Whether BO is surrounded by zext
294  /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
295  ///                array index.
296  bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
297                    bool NonNegative);
298
299  /// The path from the constant offset to the old GEP index. e.g., if the GEP
300  /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
301  /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
302  /// UserChain[2] will be the entire expression "a * b + (c + 5)".
303  ///
304  /// This path helps to rebuild the new GEP index.
305  SmallVector<User *, 8> UserChain;
306  /// A data structure used in rebuildWithoutConstOffset. Contains all
307  /// sext/zext instructions along UserChain.
308  SmallVector<CastInst *, 16> ExtInsts;
309  Instruction *IP;  /// Insertion position of cloned instructions.
310  const DataLayout &DL;
311  const DominatorTree *DT;
312};
313
314/// \brief A pass that tries to split every GEP in the function into a variadic
315/// base and a constant offset. It is a FunctionPass because searching for the
316/// constant offset may inspect other basic blocks.
317class SeparateConstOffsetFromGEP : public FunctionPass {
318public:
319  static char ID;
320  SeparateConstOffsetFromGEP(const TargetMachine *TM = nullptr,
321                             bool LowerGEP = false)
322      : FunctionPass(ID), DL(nullptr), DT(nullptr), TM(TM), LowerGEP(LowerGEP) {
323    initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
324  }
325
326  void getAnalysisUsage(AnalysisUsage &AU) const override {
327    AU.addRequired<DominatorTreeWrapperPass>();
328    AU.addRequired<ScalarEvolutionWrapperPass>();
329    AU.addRequired<TargetTransformInfoWrapperPass>();
330    AU.addRequired<LoopInfoWrapperPass>();
331    AU.setPreservesCFG();
332    AU.addRequired<TargetLibraryInfoWrapperPass>();
333  }
334
335  bool doInitialization(Module &M) override {
336    DL = &M.getDataLayout();
337    return false;
338  }
339  bool runOnFunction(Function &F) override;
340
341private:
342  /// Tries to split the given GEP into a variadic base and a constant offset,
343  /// and returns true if the splitting succeeds.
344  bool splitGEP(GetElementPtrInst *GEP);
345  /// Lower a GEP with multiple indices into multiple GEPs with a single index.
346  /// Function splitGEP already split the original GEP into a variadic part and
347  /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
348  /// variadic part into a set of GEPs with a single index and applies
349  /// AccumulativeByteOffset to it.
350  /// \p Variadic                  The variadic part of the original GEP.
351  /// \p AccumulativeByteOffset    The constant offset.
352  void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
353                              int64_t AccumulativeByteOffset);
354  /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
355  /// Function splitGEP already split the original GEP into a variadic part and
356  /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
357  /// variadic part into a set of arithmetic operations and applies
358  /// AccumulativeByteOffset to it.
359  /// \p Variadic                  The variadic part of the original GEP.
360  /// \p AccumulativeByteOffset    The constant offset.
361  void lowerToArithmetics(GetElementPtrInst *Variadic,
362                          int64_t AccumulativeByteOffset);
363  /// Finds the constant offset within each index and accumulates them. If
364  /// LowerGEP is true, it finds in indices of both sequential and structure
365  /// types, otherwise it only finds in sequential indices. The output
366  /// NeedsExtraction indicates whether we successfully find a non-zero constant
367  /// offset.
368  int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
369  /// Canonicalize array indices to pointer-size integers. This helps to
370  /// simplify the logic of splitting a GEP. For example, if a + b is a
371  /// pointer-size integer, we have
372  ///   gep base, a + b = gep (gep base, a), b
373  /// However, this equality may not hold if the size of a + b is smaller than
374  /// the pointer size, because LLVM conceptually sign-extends GEP indices to
375  /// pointer size before computing the address
376  /// (http://llvm.org/docs/LangRef.html#id181).
377  ///
378  /// This canonicalization is very likely already done in clang and
379  /// instcombine. Therefore, the program will probably remain the same.
380  ///
381  /// Returns true if the module changes.
382  ///
383  /// Verified in @i32_add in split-gep.ll
384  bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
385  /// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
386  /// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
387  /// the constant offset. After extraction, it becomes desirable to reunion the
388  /// distributed sexts. For example,
389  ///
390  ///                              &a[sext(i +nsw (j +nsw 5)]
391  ///   => distribute              &a[sext(i) +nsw (sext(j) +nsw 5)]
392  ///   => constant extraction     &a[sext(i) + sext(j)] + 5
393  ///   => reunion                 &a[sext(i +nsw j)] + 5
394  bool reuniteExts(Function &F);
395  /// A helper that reunites sexts in an instruction.
396  bool reuniteExts(Instruction *I);
397  /// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
398  Instruction *findClosestMatchingDominator(const SCEV *Key,
399                                            Instruction *Dominatee);
400  /// Verify F is free of dead code.
401  void verifyNoDeadCode(Function &F);
402
403  bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
404  // Swap the index operand of two GEP.
405  void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
406  // Check if it is safe to swap operand of two GEP.
407  bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
408                            Loop *CurLoop);
409
410  const DataLayout *DL;
411  DominatorTree *DT;
412  ScalarEvolution *SE;
413  const TargetMachine *TM;
414
415  LoopInfo *LI;
416  TargetLibraryInfo *TLI;
417  /// Whether to lower a GEP with multiple indices into arithmetic operations or
418  /// multiple GEPs with a single index.
419  bool LowerGEP;
420  DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingExprs;
421};
422}  // anonymous namespace
423
424char SeparateConstOffsetFromGEP::ID = 0;
425INITIALIZE_PASS_BEGIN(
426    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
427    "Split GEPs to a variadic base and a constant offset for better CSE", false,
428    false)
429INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
430INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
431INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
432INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
433INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
434INITIALIZE_PASS_END(
435    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
436    "Split GEPs to a variadic base and a constant offset for better CSE", false,
437    false)
438
439FunctionPass *
440llvm::createSeparateConstOffsetFromGEPPass(const TargetMachine *TM,
441                                           bool LowerGEP) {
442  return new SeparateConstOffsetFromGEP(TM, LowerGEP);
443}
444
445bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
446                                            bool ZeroExtended,
447                                            BinaryOperator *BO,
448                                            bool NonNegative) {
449  // We only consider ADD, SUB and OR, because a non-zero constant found in
450  // expressions composed of these operations can be easily hoisted as a
451  // constant offset by reassociation.
452  if (BO->getOpcode() != Instruction::Add &&
453      BO->getOpcode() != Instruction::Sub &&
454      BO->getOpcode() != Instruction::Or) {
455    return false;
456  }
457
458  Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
459  // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
460  // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
461  if (BO->getOpcode() == Instruction::Or &&
462      !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
463    return false;
464
465  // In addition, tracing into BO requires that its surrounding s/zext (if
466  // any) is distributable to both operands.
467  //
468  // Suppose BO = A op B.
469  //  SignExtended | ZeroExtended | Distributable?
470  // --------------+--------------+----------------------------------
471  //       0       |      0       | true because no s/zext exists
472  //       0       |      1       | zext(BO) == zext(A) op zext(B)
473  //       1       |      0       | sext(BO) == sext(A) op sext(B)
474  //       1       |      1       | zext(sext(BO)) ==
475  //               |              |     zext(sext(A)) op zext(sext(B))
476  if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
477    // If a + b >= 0 and (a >= 0 or b >= 0), then
478    //   sext(a + b) = sext(a) + sext(b)
479    // even if the addition is not marked nsw.
480    //
481    // Leveraging this invarient, we can trace into an sext'ed inbound GEP
482    // index if the constant offset is non-negative.
483    //
484    // Verified in @sext_add in split-gep.ll.
485    if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
486      if (!ConstLHS->isNegative())
487        return true;
488    }
489    if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
490      if (!ConstRHS->isNegative())
491        return true;
492    }
493  }
494
495  // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
496  // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
497  if (BO->getOpcode() == Instruction::Add ||
498      BO->getOpcode() == Instruction::Sub) {
499    if (SignExtended && !BO->hasNoSignedWrap())
500      return false;
501    if (ZeroExtended && !BO->hasNoUnsignedWrap())
502      return false;
503  }
504
505  return true;
506}
507
508APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
509                                                   bool SignExtended,
510                                                   bool ZeroExtended) {
511  // BO being non-negative does not shed light on whether its operands are
512  // non-negative. Clear the NonNegative flag here.
513  APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
514                              /* NonNegative */ false);
515  // If we found a constant offset in the left operand, stop and return that.
516  // This shortcut might cause us to miss opportunities of combining the
517  // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
518  // However, such cases are probably already handled by -instcombine,
519  // given this pass runs after the standard optimizations.
520  if (ConstantOffset != 0) return ConstantOffset;
521  ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
522                        /* NonNegative */ false);
523  // If U is a sub operator, negate the constant offset found in the right
524  // operand.
525  if (BO->getOpcode() == Instruction::Sub)
526    ConstantOffset = -ConstantOffset;
527  return ConstantOffset;
528}
529
530APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
531                                    bool ZeroExtended, bool NonNegative) {
532  // TODO(jingyue): We could trace into integer/pointer casts, such as
533  // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
534  // integers because it gives good enough results for our benchmarks.
535  unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
536
537  // We cannot do much with Values that are not a User, such as an Argument.
538  User *U = dyn_cast<User>(V);
539  if (U == nullptr) return APInt(BitWidth, 0);
540
541  APInt ConstantOffset(BitWidth, 0);
542  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
543    // Hooray, we found it!
544    ConstantOffset = CI->getValue();
545  } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
546    // Trace into subexpressions for more hoisting opportunities.
547    if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
548      ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
549  } else if (isa<SExtInst>(V)) {
550    ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
551                          ZeroExtended, NonNegative).sext(BitWidth);
552  } else if (isa<ZExtInst>(V)) {
553    // As an optimization, we can clear the SignExtended flag because
554    // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
555    //
556    // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
557    ConstantOffset =
558        find(U->getOperand(0), /* SignExtended */ false,
559             /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
560  }
561
562  // If we found a non-zero constant offset, add it to the path for
563  // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
564  // help this optimization.
565  if (ConstantOffset != 0)
566    UserChain.push_back(U);
567  return ConstantOffset;
568}
569
570Value *ConstantOffsetExtractor::applyExts(Value *V) {
571  Value *Current = V;
572  // ExtInsts is built in the use-def order. Therefore, we apply them to V
573  // in the reversed order.
574  for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
575    if (Constant *C = dyn_cast<Constant>(Current)) {
576      // If Current is a constant, apply s/zext using ConstantExpr::getCast.
577      // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
578      Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
579    } else {
580      Instruction *Ext = (*I)->clone();
581      Ext->setOperand(0, Current);
582      Ext->insertBefore(IP);
583      Current = Ext;
584    }
585  }
586  return Current;
587}
588
589Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
590  distributeExtsAndCloneChain(UserChain.size() - 1);
591  // Remove all nullptrs (used to be s/zext) from UserChain.
592  unsigned NewSize = 0;
593  for (auto I = UserChain.begin(), E = UserChain.end(); I != E; ++I) {
594    if (*I != nullptr) {
595      UserChain[NewSize] = *I;
596      NewSize++;
597    }
598  }
599  UserChain.resize(NewSize);
600  return removeConstOffset(UserChain.size() - 1);
601}
602
603Value *
604ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
605  User *U = UserChain[ChainIndex];
606  if (ChainIndex == 0) {
607    assert(isa<ConstantInt>(U));
608    // If U is a ConstantInt, applyExts will return a ConstantInt as well.
609    return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
610  }
611
612  if (CastInst *Cast = dyn_cast<CastInst>(U)) {
613    assert((isa<SExtInst>(Cast) || isa<ZExtInst>(Cast)) &&
614           "We only traced into two types of CastInst: sext and zext");
615    ExtInsts.push_back(Cast);
616    UserChain[ChainIndex] = nullptr;
617    return distributeExtsAndCloneChain(ChainIndex - 1);
618  }
619
620  // Function find only trace into BinaryOperator and CastInst.
621  BinaryOperator *BO = cast<BinaryOperator>(U);
622  // OpNo = which operand of BO is UserChain[ChainIndex - 1]
623  unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
624  Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
625  Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
626
627  BinaryOperator *NewBO = nullptr;
628  if (OpNo == 0) {
629    NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
630                                   BO->getName(), IP);
631  } else {
632    NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
633                                   BO->getName(), IP);
634  }
635  return UserChain[ChainIndex] = NewBO;
636}
637
638Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
639  if (ChainIndex == 0) {
640    assert(isa<ConstantInt>(UserChain[ChainIndex]));
641    return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
642  }
643
644  BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
645  assert(BO->getNumUses() <= 1 &&
646         "distributeExtsAndCloneChain clones each BinaryOperator in "
647         "UserChain, so no one should be used more than "
648         "once");
649
650  unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
651  assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
652  Value *NextInChain = removeConstOffset(ChainIndex - 1);
653  Value *TheOther = BO->getOperand(1 - OpNo);
654
655  // If NextInChain is 0 and not the LHS of a sub, we can simplify the
656  // sub-expression to be just TheOther.
657  if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
658    if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
659      return TheOther;
660  }
661
662  BinaryOperator::BinaryOps NewOp = BO->getOpcode();
663  if (BO->getOpcode() == Instruction::Or) {
664    // Rebuild "or" as "add", because "or" may be invalid for the new
665    // epxression.
666    //
667    // For instance, given
668    //   a | (b + 5) where a and b + 5 have no common bits,
669    // we can extract 5 as the constant offset.
670    //
671    // However, reusing the "or" in the new index would give us
672    //   (a | b) + 5
673    // which does not equal a | (b + 5).
674    //
675    // Replacing the "or" with "add" is fine, because
676    //   a | (b + 5) = a + (b + 5) = (a + b) + 5
677    NewOp = Instruction::Add;
678  }
679
680  BinaryOperator *NewBO;
681  if (OpNo == 0) {
682    NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
683  } else {
684    NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
685  }
686  NewBO->takeName(BO);
687  return NewBO;
688}
689
690Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
691                                        User *&UserChainTail,
692                                        const DominatorTree *DT) {
693  ConstantOffsetExtractor Extractor(GEP, DT);
694  // Find a non-zero constant offset first.
695  APInt ConstantOffset =
696      Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
697                     GEP->isInBounds());
698  if (ConstantOffset == 0) {
699    UserChainTail = nullptr;
700    return nullptr;
701  }
702  // Separates the constant offset from the GEP index.
703  Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
704  UserChainTail = Extractor.UserChain.back();
705  return IdxWithoutConstOffset;
706}
707
708int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
709                                      const DominatorTree *DT) {
710  // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
711  return ConstantOffsetExtractor(GEP, DT)
712      .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
713            GEP->isInBounds())
714      .getSExtValue();
715}
716
717bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
718    GetElementPtrInst *GEP) {
719  bool Changed = false;
720  Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
721  gep_type_iterator GTI = gep_type_begin(*GEP);
722  for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
723       I != E; ++I, ++GTI) {
724    // Skip struct member indices which must be i32.
725    if (isa<SequentialType>(*GTI)) {
726      if ((*I)->getType() != IntPtrTy) {
727        *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
728        Changed = true;
729      }
730    }
731  }
732  return Changed;
733}
734
735int64_t
736SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
737                                                 bool &NeedsExtraction) {
738  NeedsExtraction = false;
739  int64_t AccumulativeByteOffset = 0;
740  gep_type_iterator GTI = gep_type_begin(*GEP);
741  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
742    if (isa<SequentialType>(*GTI)) {
743      // Tries to extract a constant offset from this GEP index.
744      int64_t ConstantOffset =
745          ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
746      if (ConstantOffset != 0) {
747        NeedsExtraction = true;
748        // A GEP may have multiple indices.  We accumulate the extracted
749        // constant offset to a byte offset, and later offset the remainder of
750        // the original GEP with this byte offset.
751        AccumulativeByteOffset +=
752            ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
753      }
754    } else if (LowerGEP) {
755      StructType *StTy = cast<StructType>(*GTI);
756      uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
757      // Skip field 0 as the offset is always 0.
758      if (Field != 0) {
759        NeedsExtraction = true;
760        AccumulativeByteOffset +=
761            DL->getStructLayout(StTy)->getElementOffset(Field);
762      }
763    }
764  }
765  return AccumulativeByteOffset;
766}
767
768void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
769    GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
770  IRBuilder<> Builder(Variadic);
771  Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
772
773  Type *I8PtrTy =
774      Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
775  Value *ResultPtr = Variadic->getOperand(0);
776  Loop *L = LI->getLoopFor(Variadic->getParent());
777  // Check if the base is not loop invariant or used more than once.
778  bool isSwapCandidate =
779      L && L->isLoopInvariant(ResultPtr) &&
780      !hasMoreThanOneUseInLoop(ResultPtr, L);
781  Value *FirstResult = nullptr;
782
783  if (ResultPtr->getType() != I8PtrTy)
784    ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
785
786  gep_type_iterator GTI = gep_type_begin(*Variadic);
787  // Create an ugly GEP for each sequential index. We don't create GEPs for
788  // structure indices, as they are accumulated in the constant offset index.
789  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
790    if (isa<SequentialType>(*GTI)) {
791      Value *Idx = Variadic->getOperand(I);
792      // Skip zero indices.
793      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
794        if (CI->isZero())
795          continue;
796
797      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
798                                DL->getTypeAllocSize(GTI.getIndexedType()));
799      // Scale the index by element size.
800      if (ElementSize != 1) {
801        if (ElementSize.isPowerOf2()) {
802          Idx = Builder.CreateShl(
803              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
804        } else {
805          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
806        }
807      }
808      // Create an ugly GEP with a single index for each index.
809      ResultPtr =
810          Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
811      if (FirstResult == nullptr)
812        FirstResult = ResultPtr;
813    }
814  }
815
816  // Create a GEP with the constant offset index.
817  if (AccumulativeByteOffset != 0) {
818    Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
819    ResultPtr =
820        Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
821  } else
822    isSwapCandidate = false;
823
824  // If we created a GEP with constant index, and the base is loop invariant,
825  // then we swap the first one with it, so LICM can move constant GEP out
826  // later.
827  GetElementPtrInst *FirstGEP = dyn_cast<GetElementPtrInst>(FirstResult);
828  GetElementPtrInst *SecondGEP = dyn_cast<GetElementPtrInst>(ResultPtr);
829  if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
830    swapGEPOperand(FirstGEP, SecondGEP);
831
832  if (ResultPtr->getType() != Variadic->getType())
833    ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
834
835  Variadic->replaceAllUsesWith(ResultPtr);
836  Variadic->eraseFromParent();
837}
838
839void
840SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
841                                               int64_t AccumulativeByteOffset) {
842  IRBuilder<> Builder(Variadic);
843  Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
844
845  Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
846  gep_type_iterator GTI = gep_type_begin(*Variadic);
847  // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
848  // don't create arithmetics for structure indices, as they are accumulated
849  // in the constant offset index.
850  for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
851    if (isa<SequentialType>(*GTI)) {
852      Value *Idx = Variadic->getOperand(I);
853      // Skip zero indices.
854      if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
855        if (CI->isZero())
856          continue;
857
858      APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
859                                DL->getTypeAllocSize(GTI.getIndexedType()));
860      // Scale the index by element size.
861      if (ElementSize != 1) {
862        if (ElementSize.isPowerOf2()) {
863          Idx = Builder.CreateShl(
864              Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
865        } else {
866          Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
867        }
868      }
869      // Create an ADD for each index.
870      ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
871    }
872  }
873
874  // Create an ADD for the constant offset index.
875  if (AccumulativeByteOffset != 0) {
876    ResultPtr = Builder.CreateAdd(
877        ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
878  }
879
880  ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
881  Variadic->replaceAllUsesWith(ResultPtr);
882  Variadic->eraseFromParent();
883}
884
885bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
886  // Skip vector GEPs.
887  if (GEP->getType()->isVectorTy())
888    return false;
889
890  // The backend can already nicely handle the case where all indices are
891  // constant.
892  if (GEP->hasAllConstantIndices())
893    return false;
894
895  bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
896
897  bool NeedsExtraction;
898  int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
899
900  if (!NeedsExtraction)
901    return Changed;
902  // If LowerGEP is disabled, before really splitting the GEP, check whether the
903  // backend supports the addressing mode we are about to produce. If no, this
904  // splitting probably won't be beneficial.
905  // If LowerGEP is enabled, even the extracted constant offset can not match
906  // the addressing mode, we can still do optimizations to other lowered parts
907  // of variable indices. Therefore, we don't check for addressing modes in that
908  // case.
909  if (!LowerGEP) {
910    TargetTransformInfo &TTI =
911        getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
912            *GEP->getParent()->getParent());
913    unsigned AddrSpace = GEP->getPointerAddressSpace();
914    if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
915                                   /*BaseGV=*/nullptr, AccumulativeByteOffset,
916                                   /*HasBaseReg=*/true, /*Scale=*/0,
917                                   AddrSpace)) {
918      return Changed;
919    }
920  }
921
922  // Remove the constant offset in each sequential index. The resultant GEP
923  // computes the variadic base.
924  // Notice that we don't remove struct field indices here. If LowerGEP is
925  // disabled, a structure index is not accumulated and we still use the old
926  // one. If LowerGEP is enabled, a structure index is accumulated in the
927  // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
928  // handle the constant offset and won't need a new structure index.
929  gep_type_iterator GTI = gep_type_begin(*GEP);
930  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
931    if (isa<SequentialType>(*GTI)) {
932      // Splits this GEP index into a variadic part and a constant offset, and
933      // uses the variadic part as the new index.
934      Value *OldIdx = GEP->getOperand(I);
935      User *UserChainTail;
936      Value *NewIdx =
937          ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
938      if (NewIdx != nullptr) {
939        // Switches to the index with the constant offset removed.
940        GEP->setOperand(I, NewIdx);
941        // After switching to the new index, we can garbage-collect UserChain
942        // and the old index if they are not used.
943        RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
944        RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
945      }
946    }
947  }
948
949  // Clear the inbounds attribute because the new index may be off-bound.
950  // e.g.,
951  //
952  //   b     = add i64 a, 5
953  //   addr  = gep inbounds float, float* p, i64 b
954  //
955  // is transformed to:
956  //
957  //   addr2 = gep float, float* p, i64 a ; inbounds removed
958  //   addr  = gep inbounds float, float* addr2, i64 5
959  //
960  // If a is -4, although the old index b is in bounds, the new index a is
961  // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
962  // inbounds keyword is not present, the offsets are added to the base
963  // address with silently-wrapping two's complement arithmetic".
964  // Therefore, the final code will be a semantically equivalent.
965  //
966  // TODO(jingyue): do some range analysis to keep as many inbounds as
967  // possible. GEPs with inbounds are more friendly to alias analysis.
968  bool GEPWasInBounds = GEP->isInBounds();
969  GEP->setIsInBounds(false);
970
971  // Lowers a GEP to either GEPs with a single index or arithmetic operations.
972  if (LowerGEP) {
973    // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
974    // arithmetic operations if the target uses alias analysis in codegen.
975    if (TM && TM->getSubtargetImpl(*GEP->getParent()->getParent())->useAA())
976      lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
977    else
978      lowerToArithmetics(GEP, AccumulativeByteOffset);
979    return true;
980  }
981
982  // No need to create another GEP if the accumulative byte offset is 0.
983  if (AccumulativeByteOffset == 0)
984    return true;
985
986  // Offsets the base with the accumulative byte offset.
987  //
988  //   %gep                        ; the base
989  //   ... %gep ...
990  //
991  // => add the offset
992  //
993  //   %gep2                       ; clone of %gep
994  //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
995  //   %gep                        ; will be removed
996  //   ... %gep ...
997  //
998  // => replace all uses of %gep with %new.gep and remove %gep
999  //
1000  //   %gep2                       ; clone of %gep
1001  //   %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1002  //   ... %new.gep ...
1003  //
1004  // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
1005  // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
1006  // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
1007  // type of %gep.
1008  //
1009  //   %gep2                       ; clone of %gep
1010  //   %0       = bitcast %gep2 to i8*
1011  //   %uglygep = gep %0, <offset>
1012  //   %new.gep = bitcast %uglygep to <type of %gep>
1013  //   ... %new.gep ...
1014  Instruction *NewGEP = GEP->clone();
1015  NewGEP->insertBefore(GEP);
1016
1017  // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
1018  // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
1019  // used with unsigned integers later.
1020  int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
1021      DL->getTypeAllocSize(GEP->getType()->getElementType()));
1022  Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
1023  if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1024    // Very likely. As long as %gep is natually aligned, the byte offset we
1025    // extracted should be a multiple of sizeof(*%gep).
1026    int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1027    NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
1028                                       ConstantInt::get(IntPtrTy, Index, true),
1029                                       GEP->getName(), GEP);
1030    // Inherit the inbounds attribute of the original GEP.
1031    cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1032  } else {
1033    // Unlikely but possible. For example,
1034    // #pragma pack(1)
1035    // struct S {
1036    //   int a[3];
1037    //   int64 b[8];
1038    // };
1039    // #pragma pack()
1040    //
1041    // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
1042    // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
1043    // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
1044    // sizeof(int64).
1045    //
1046    // Emit an uglygep in this case.
1047    Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
1048                                       GEP->getPointerAddressSpace());
1049    NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
1050    NewGEP = GetElementPtrInst::Create(
1051        Type::getInt8Ty(GEP->getContext()), NewGEP,
1052        ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
1053        GEP);
1054    // Inherit the inbounds attribute of the original GEP.
1055    cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1056    if (GEP->getType() != I8PtrTy)
1057      NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
1058  }
1059
1060  GEP->replaceAllUsesWith(NewGEP);
1061  GEP->eraseFromParent();
1062
1063  return true;
1064}
1065
1066bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1067  if (skipOptnoneFunction(F))
1068    return false;
1069
1070  if (DisableSeparateConstOffsetFromGEP)
1071    return false;
1072
1073  DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1074  SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1075  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1076  TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1077  bool Changed = false;
1078  for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
1079    for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE;)
1080      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++))
1081        Changed |= splitGEP(GEP);
1082    // No need to split GEP ConstantExprs because all its indices are constant
1083    // already.
1084  }
1085
1086  Changed |= reuniteExts(F);
1087
1088  if (VerifyNoDeadCode)
1089    verifyNoDeadCode(F);
1090
1091  return Changed;
1092}
1093
1094Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1095    const SCEV *Key, Instruction *Dominatee) {
1096  auto Pos = DominatingExprs.find(Key);
1097  if (Pos == DominatingExprs.end())
1098    return nullptr;
1099
1100  auto &Candidates = Pos->second;
1101  // Because we process the basic blocks in pre-order of the dominator tree, a
1102  // candidate that doesn't dominate the current instruction won't dominate any
1103  // future instruction either. Therefore, we pop it out of the stack. This
1104  // optimization makes the algorithm O(n).
1105  while (!Candidates.empty()) {
1106    Instruction *Candidate = Candidates.back();
1107    if (DT->dominates(Candidate, Dominatee))
1108      return Candidate;
1109    Candidates.pop_back();
1110  }
1111  return nullptr;
1112}
1113
1114bool SeparateConstOffsetFromGEP::reuniteExts(Instruction *I) {
1115  if (!SE->isSCEVable(I->getType()))
1116    return false;
1117
1118  //   Dom: LHS+RHS
1119  //   I: sext(LHS)+sext(RHS)
1120  // If Dom can't sign overflow and Dom dominates I, optimize I to sext(Dom).
1121  // TODO: handle zext
1122  Value *LHS = nullptr, *RHS = nullptr;
1123  if (match(I, m_Add(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS)))) ||
1124      match(I, m_Sub(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1125    if (LHS->getType() == RHS->getType()) {
1126      const SCEV *Key =
1127          SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1128      if (auto *Dom = findClosestMatchingDominator(Key, I)) {
1129        Instruction *NewSExt = new SExtInst(Dom, I->getType(), "", I);
1130        NewSExt->takeName(I);
1131        I->replaceAllUsesWith(NewSExt);
1132        RecursivelyDeleteTriviallyDeadInstructions(I);
1133        return true;
1134      }
1135    }
1136  }
1137
1138  // Add I to DominatingExprs if it's an add/sub that can't sign overflow.
1139  if (match(I, m_NSWAdd(m_Value(LHS), m_Value(RHS))) ||
1140      match(I, m_NSWSub(m_Value(LHS), m_Value(RHS)))) {
1141    if (isKnownNotFullPoison(I)) {
1142      const SCEV *Key =
1143          SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1144      DominatingExprs[Key].push_back(I);
1145    }
1146  }
1147  return false;
1148}
1149
1150bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) {
1151  bool Changed = false;
1152  DominatingExprs.clear();
1153  for (auto Node = GraphTraits<DominatorTree *>::nodes_begin(DT);
1154       Node != GraphTraits<DominatorTree *>::nodes_end(DT); ++Node) {
1155    BasicBlock *BB = Node->getBlock();
1156    for (auto I = BB->begin(); I != BB->end(); ) {
1157      Instruction *Cur = &*I++;
1158      Changed |= reuniteExts(Cur);
1159    }
1160  }
1161  return Changed;
1162}
1163
1164void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1165  for (auto &B : F) {
1166    for (auto &I : B) {
1167      if (isInstructionTriviallyDead(&I)) {
1168        std::string ErrMessage;
1169        raw_string_ostream RSO(ErrMessage);
1170        RSO << "Dead instruction detected!\n" << I << "\n";
1171        llvm_unreachable(RSO.str().c_str());
1172      }
1173    }
1174  }
1175}
1176
1177bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1178    GetElementPtrInst *FirstGEP, GetElementPtrInst *SecondGEP, Loop *CurLoop) {
1179  if (!FirstGEP || !FirstGEP->hasOneUse())
1180    return false;
1181
1182  if (!SecondGEP || FirstGEP->getParent() != SecondGEP->getParent())
1183    return false;
1184
1185  if (FirstGEP == SecondGEP)
1186    return false;
1187
1188  unsigned FirstNum = FirstGEP->getNumOperands();
1189  unsigned SecondNum = SecondGEP->getNumOperands();
1190  // Give up if the number of operands are not 2.
1191  if (FirstNum != SecondNum || FirstNum != 2)
1192    return false;
1193
1194  Value *FirstBase = FirstGEP->getOperand(0);
1195  Value *SecondBase = SecondGEP->getOperand(0);
1196  Value *FirstOffset = FirstGEP->getOperand(1);
1197  // Give up if the index of the first GEP is loop invariant.
1198  if (CurLoop->isLoopInvariant(FirstOffset))
1199    return false;
1200
1201  // Give up if base doesn't have same type.
1202  if (FirstBase->getType() != SecondBase->getType())
1203    return false;
1204
1205  Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1206
1207  // Check if the second operand of first GEP has constant coefficient.
1208  // For an example, for the following code,  we won't gain anything by
1209  // hoisting the second GEP out because the second GEP can be folded away.
1210  //   %scevgep.sum.ur159 = add i64 %idxprom48.ur, 256
1211  //   %67 = shl i64 %scevgep.sum.ur159, 2
1212  //   %uglygep160 = getelementptr i8* %65, i64 %67
1213  //   %uglygep161 = getelementptr i8* %uglygep160, i64 -1024
1214
1215  // Skip constant shift instruction which may be generated by Splitting GEPs.
1216  if (FirstOffsetDef && FirstOffsetDef->isShift() &&
1217      isa<ConstantInt>(FirstOffsetDef->getOperand(1)))
1218    FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->getOperand(0));
1219
1220  // Give up if FirstOffsetDef is an Add or Sub with constant.
1221  // Because it may not profitable at all due to constant folding.
1222  if (FirstOffsetDef)
1223    if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1224      unsigned opc = BO->getOpcode();
1225      if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1226          (isa<ConstantInt>(BO->getOperand(0)) ||
1227           isa<ConstantInt>(BO->getOperand(1))))
1228        return false;
1229    }
1230  return true;
1231}
1232
1233bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(Value *V, Loop *L) {
1234  int UsesInLoop = 0;
1235  for (User *U : V->users()) {
1236    if (Instruction *User = dyn_cast<Instruction>(U))
1237      if (L->contains(User))
1238        if (++UsesInLoop > 1)
1239          return true;
1240  }
1241  return false;
1242}
1243
1244void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
1245                                                GetElementPtrInst *Second) {
1246  Value *Offset1 = First->getOperand(1);
1247  Value *Offset2 = Second->getOperand(1);
1248  First->setOperand(1, Offset2);
1249  Second->setOperand(1, Offset1);
1250
1251  // We changed p+o+c to p+c+o, p+c may not be inbound anymore.
1252  const DataLayout &DAL = First->getModule()->getDataLayout();
1253  APInt Offset(DAL.getPointerSizeInBits(
1254                   cast<PointerType>(First->getType())->getAddressSpace()),
1255               0);
1256  Value *NewBase =
1257      First->stripAndAccumulateInBoundsConstantOffsets(DAL, Offset);
1258  uint64_t ObjectSize;
1259  if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
1260     Offset.ugt(ObjectSize)) {
1261    First->setIsInBounds(false);
1262    Second->setIsInBounds(false);
1263  } else
1264    First->setIsInBounds(true);
1265}
1266