1//===----------------------- AlignmentFromAssumptions.cpp -----------------===//
2//                  Set Load/Store Alignments From Assumptions
3//
4// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements a ScalarEvolution-based transformation to set
11// the alignments of load, stores and memory intrinsics based on the truth
12// expressions of assume intrinsics. The primary motivation is to handle
13// complex alignment assumptions that apply to vector loads and stores that
14// appear after vectorization and unrolling.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/InitializePasses.h"
19#define AA_NAME "alignment-from-assumptions"
20#define DEBUG_TYPE AA_NAME
21#include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/Statistic.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/GlobalsModRef.h"
27#include "llvm/Analysis/LoopInfo.h"
28#include "llvm/Analysis/ScalarEvolutionExpressions.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/IR/Constant.h"
31#include "llvm/IR/Dominators.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/Module.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Transforms/Scalar.h"
38using namespace llvm;
39
40STATISTIC(NumLoadAlignChanged,
41  "Number of loads changed by alignment assumptions");
42STATISTIC(NumStoreAlignChanged,
43  "Number of stores changed by alignment assumptions");
44STATISTIC(NumMemIntAlignChanged,
45  "Number of memory intrinsics changed by alignment assumptions");
46
47namespace {
48struct AlignmentFromAssumptions : public FunctionPass {
49  static char ID; // Pass identification, replacement for typeid
50  AlignmentFromAssumptions() : FunctionPass(ID) {
51    initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
52  }
53
54  bool runOnFunction(Function &F) override;
55
56  void getAnalysisUsage(AnalysisUsage &AU) const override {
57    AU.addRequired<AssumptionCacheTracker>();
58    AU.addRequired<ScalarEvolutionWrapperPass>();
59    AU.addRequired<DominatorTreeWrapperPass>();
60
61    AU.setPreservesCFG();
62    AU.addPreserved<AAResultsWrapperPass>();
63    AU.addPreserved<GlobalsAAWrapperPass>();
64    AU.addPreserved<LoopInfoWrapperPass>();
65    AU.addPreserved<DominatorTreeWrapperPass>();
66    AU.addPreserved<ScalarEvolutionWrapperPass>();
67  }
68
69  AlignmentFromAssumptionsPass Impl;
70};
71}
72
73char AlignmentFromAssumptions::ID = 0;
74static const char aip_name[] = "Alignment from assumptions";
75INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
76                      aip_name, false, false)
77INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
78INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
79INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
80INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
81                    aip_name, false, false)
82
83FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
84  return new AlignmentFromAssumptions();
85}
86
87// Given an expression for the (constant) alignment, AlignSCEV, and an
88// expression for the displacement between a pointer and the aligned address,
89// DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
90// to a constant. Using SCEV to compute alignment handles the case where
91// DiffSCEV is a recurrence with constant start such that the aligned offset
92// is constant. e.g. {16,+,32} % 32 -> 16.
93static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
94                                    const SCEV *AlignSCEV,
95                                    ScalarEvolution *SE) {
96  // DiffUnits = Diff % int64_t(Alignment)
97  const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
98
99  LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
100                    << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
101
102  if (const SCEVConstant *ConstDUSCEV =
103      dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
104    int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue();
105
106    // If the displacement is an exact multiple of the alignment, then the
107    // displaced pointer has the same alignment as the aligned pointer, so
108    // return the alignment value.
109    if (!DiffUnits)
110      return (unsigned)
111        cast<SCEVConstant>(AlignSCEV)->getValue()->getSExtValue();
112
113    // If the displacement is not an exact multiple, but the remainder is a
114    // constant, then return this remainder (but only if it is a power of 2).
115    uint64_t DiffUnitsAbs = std::abs(DiffUnits);
116    if (isPowerOf2_64(DiffUnitsAbs))
117      return (unsigned) DiffUnitsAbs;
118  }
119
120  return 0;
121}
122
123// There is an address given by an offset OffSCEV from AASCEV which has an
124// alignment AlignSCEV. Use that information, if possible, to compute a new
125// alignment for Ptr.
126static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
127                                const SCEV *OffSCEV, Value *Ptr,
128                                ScalarEvolution *SE) {
129  const SCEV *PtrSCEV = SE->getSCEV(Ptr);
130  const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV);
131
132  // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
133  // sign-extended OffSCEV to i64, so make sure they agree again.
134  DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType());
135
136  // What we really want to know is the overall offset to the aligned
137  // address. This address is displaced by the provided offset.
138  DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV);
139
140  LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
141                    << *AlignSCEV << " and offset " << *OffSCEV
142                    << " using diff " << *DiffSCEV << "\n");
143
144  unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE);
145  LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n");
146
147  if (NewAlignment) {
148    return NewAlignment;
149  } else if (const SCEVAddRecExpr *DiffARSCEV =
150             dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
151    // The relative offset to the alignment assumption did not yield a constant,
152    // but we should try harder: if we assume that a is 32-byte aligned, then in
153    // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
154    // 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
155    // As a result, the new alignment will not be a constant, but can still
156    // be improved over the default (of 4) to 16.
157
158    const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
159    const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
160
161    LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
162                      << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
163
164    // Now compute the new alignment using the displacement to the value in the
165    // first iteration, and also the alignment using the per-iteration delta.
166    // If these are the same, then use that answer. Otherwise, use the smaller
167    // one, but only if it divides the larger one.
168    NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
169    unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
170
171    LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n");
172    LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n");
173
174    if (!NewAlignment || !NewIncAlignment) {
175      return 0;
176    } else if (NewAlignment > NewIncAlignment) {
177      if (NewAlignment % NewIncAlignment == 0) {
178        LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment
179                          << "\n");
180        return NewIncAlignment;
181      }
182    } else if (NewIncAlignment > NewAlignment) {
183      if (NewIncAlignment % NewAlignment == 0) {
184        LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
185                          << "\n");
186        return NewAlignment;
187      }
188    } else if (NewIncAlignment == NewAlignment) {
189      LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
190                        << "\n");
191      return NewAlignment;
192    }
193  }
194
195  return 0;
196}
197
198bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
199                                                        Value *&AAPtr,
200                                                        const SCEV *&AlignSCEV,
201                                                        const SCEV *&OffSCEV) {
202  // An alignment assume must be a statement about the least-significant
203  // bits of the pointer being zero, possibly with some offset.
204  ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));
205  if (!ICI)
206    return false;
207
208  // This must be an expression of the form: x & m == 0.
209  if (ICI->getPredicate() != ICmpInst::ICMP_EQ)
210    return false;
211
212  // Swap things around so that the RHS is 0.
213  Value *CmpLHS = ICI->getOperand(0);
214  Value *CmpRHS = ICI->getOperand(1);
215  const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);
216  const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);
217  if (CmpLHSSCEV->isZero())
218    std::swap(CmpLHS, CmpRHS);
219  else if (!CmpRHSSCEV->isZero())
220    return false;
221
222  BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);
223  if (!CmpBO || CmpBO->getOpcode() != Instruction::And)
224    return false;
225
226  // Swap things around so that the right operand of the and is a constant
227  // (the mask); we cannot deal with variable masks.
228  Value *AndLHS = CmpBO->getOperand(0);
229  Value *AndRHS = CmpBO->getOperand(1);
230  const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);
231  const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);
232  if (isa<SCEVConstant>(AndLHSSCEV)) {
233    std::swap(AndLHS, AndRHS);
234    std::swap(AndLHSSCEV, AndRHSSCEV);
235  }
236
237  const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);
238  if (!MaskSCEV)
239    return false;
240
241  // The mask must have some trailing ones (otherwise the condition is
242  // trivial and tells us nothing about the alignment of the left operand).
243  unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes();
244  if (!TrailingOnes)
245    return false;
246
247  // Cap the alignment at the maximum with which LLVM can deal (and make sure
248  // we don't overflow the shift).
249  uint64_t Alignment;
250  TrailingOnes = std::min(TrailingOnes,
251    unsigned(sizeof(unsigned) * CHAR_BIT - 1));
252  Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);
253
254  Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());
255  AlignSCEV = SE->getConstant(Int64Ty, Alignment);
256
257  // The LHS might be a ptrtoint instruction, or it might be the pointer
258  // with an offset.
259  AAPtr = nullptr;
260  OffSCEV = nullptr;
261  if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {
262    AAPtr = PToI->getPointerOperand();
263    OffSCEV = SE->getZero(Int64Ty);
264  } else if (const SCEVAddExpr* AndLHSAddSCEV =
265             dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {
266    // Try to find the ptrtoint; subtract it and the rest is the offset.
267    for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),
268         JE = AndLHSAddSCEV->op_end(); J != JE; ++J)
269      if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))
270        if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {
271          AAPtr = PToI->getPointerOperand();
272          OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);
273          break;
274        }
275  }
276
277  if (!AAPtr)
278    return false;
279
280  // Sign extend the offset to 64 bits (so that it is like all of the other
281  // expressions).
282  unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();
283  if (OffSCEVBits < 64)
284    OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);
285  else if (OffSCEVBits > 64)
286    return false;
287
288  AAPtr = AAPtr->stripPointerCasts();
289  return true;
290}
291
292bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
293  Value *AAPtr;
294  const SCEV *AlignSCEV, *OffSCEV;
295  if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV))
296    return false;
297
298  // Skip ConstantPointerNull and UndefValue.  Assumptions on these shouldn't
299  // affect other users.
300  if (isa<ConstantData>(AAPtr))
301    return false;
302
303  const SCEV *AASCEV = SE->getSCEV(AAPtr);
304
305  // Apply the assumption to all other users of the specified pointer.
306  SmallPtrSet<Instruction *, 32> Visited;
307  SmallVector<Instruction*, 16> WorkList;
308  for (User *J : AAPtr->users()) {
309    if (J == ACall)
310      continue;
311
312    if (Instruction *K = dyn_cast<Instruction>(J))
313      if (isValidAssumeForContext(ACall, K, DT))
314        WorkList.push_back(K);
315  }
316
317  while (!WorkList.empty()) {
318    Instruction *J = WorkList.pop_back_val();
319
320    if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
321      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
322        LI->getPointerOperand(), SE);
323
324      if (NewAlignment > LI->getAlignment()) {
325        LI->setAlignment(MaybeAlign(NewAlignment));
326        ++NumLoadAlignChanged;
327      }
328    } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
329      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
330        SI->getPointerOperand(), SE);
331
332      if (NewAlignment > SI->getAlignment()) {
333        SI->setAlignment(MaybeAlign(NewAlignment));
334        ++NumStoreAlignChanged;
335      }
336    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
337      unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
338        MI->getDest(), SE);
339
340      LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";);
341      if (NewDestAlignment > MI->getDestAlignment()) {
342        MI->setDestAlignment(NewDestAlignment);
343        ++NumMemIntAlignChanged;
344      }
345
346      // For memory transfers, there is also a source alignment that
347      // can be set.
348      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
349        unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
350          MTI->getSource(), SE);
351
352        LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";);
353
354        if (NewSrcAlignment > MTI->getSourceAlignment()) {
355          MTI->setSourceAlignment(NewSrcAlignment);
356          ++NumMemIntAlignChanged;
357        }
358      }
359    }
360
361    // Now that we've updated that use of the pointer, look for other uses of
362    // the pointer to update.
363    Visited.insert(J);
364    for (User *UJ : J->users()) {
365      Instruction *K = cast<Instruction>(UJ);
366      if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))
367        WorkList.push_back(K);
368    }
369  }
370
371  return true;
372}
373
374bool AlignmentFromAssumptions::runOnFunction(Function &F) {
375  if (skipFunction(F))
376    return false;
377
378  auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
379  ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
380  DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
381
382  return Impl.runImpl(F, AC, SE, DT);
383}
384
385bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
386                                           ScalarEvolution *SE_,
387                                           DominatorTree *DT_) {
388  SE = SE_;
389  DT = DT_;
390
391  bool Changed = false;
392  for (auto &AssumeVH : AC.assumptions())
393    if (AssumeVH)
394      Changed |= processAssumption(cast<CallInst>(AssumeVH));
395
396  return Changed;
397}
398
399PreservedAnalyses
400AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) {
401
402  AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
403  ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
404  DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
405  if (!runImpl(F, AC, &SE, &DT))
406    return PreservedAnalyses::all();
407
408  PreservedAnalyses PA;
409  PA.preserveSet<CFGAnalyses>();
410  PA.preserve<AAManager>();
411  PA.preserve<ScalarEvolutionAnalysis>();
412  PA.preserve<GlobalsAA>();
413  return PA;
414}
415