1//===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass adds amdgpu.uniform metadata to IR values so this information
11/// can be used during instruction selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "Utils/AMDGPUBaseInfo.h"
17#include "llvm/ADT/SetVector.h"
18#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
19#include "llvm/Analysis/LoopInfo.h"
20#include "llvm/Analysis/MemoryDependenceAnalysis.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/InstVisitor.h"
23#include "llvm/InitializePasses.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/raw_ostream.h"
26
27#define DEBUG_TYPE "amdgpu-annotate-uniform"
28
29using namespace llvm;
30
31namespace {
32
33class AMDGPUAnnotateUniformValues : public FunctionPass,
34                       public InstVisitor<AMDGPUAnnotateUniformValues> {
35  LegacyDivergenceAnalysis *DA;
36  MemoryDependenceResults *MDR;
37  LoopInfo *LI;
38  DenseMap<Value*, GetElementPtrInst*> noClobberClones;
39  bool isEntryFunc;
40
41public:
42  static char ID;
43  AMDGPUAnnotateUniformValues() :
44    FunctionPass(ID) { }
45  bool doInitialization(Module &M) override;
46  bool runOnFunction(Function &F) override;
47  StringRef getPassName() const override {
48    return "AMDGPU Annotate Uniform Values";
49  }
50  void getAnalysisUsage(AnalysisUsage &AU) const override {
51    AU.addRequired<LegacyDivergenceAnalysis>();
52    AU.addRequired<MemoryDependenceWrapperPass>();
53    AU.addRequired<LoopInfoWrapperPass>();
54    AU.setPreservesAll();
55 }
56
57  void visitBranchInst(BranchInst &I);
58  void visitLoadInst(LoadInst &I);
59  bool isClobberedInFunction(LoadInst * Load);
60};
61
62} // End anonymous namespace
63
64INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
65                      "Add AMDGPU uniform metadata", false, false)
66INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
67INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
68INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
69INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
70                    "Add AMDGPU uniform metadata", false, false)
71
72char AMDGPUAnnotateUniformValues::ID = 0;
73
74static void setUniformMetadata(Instruction *I) {
75  I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
76}
77static void setNoClobberMetadata(Instruction *I) {
78  I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
79}
80
81static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) {
82  for (auto I : predecessors(Root))
83    if (Set.insert(I))
84      DFS(I, Set);
85}
86
87bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
88  // 1. get Loop for the Load->getparent();
89  // 2. if it exists, collect all the BBs from the most outer
90  // loop and check for the writes. If NOT - start DFS over all preds.
91  // 3. Start DFS over all preds from the most outer loop header.
92  SetVector<BasicBlock *> Checklist;
93  BasicBlock *Start = Load->getParent();
94  Checklist.insert(Start);
95  const Value *Ptr = Load->getPointerOperand();
96  const Loop *L = LI->getLoopFor(Start);
97  if (L) {
98    const Loop *P = L;
99    do {
100      L = P;
101      P = P->getParentLoop();
102    } while (P);
103    Checklist.insert(L->block_begin(), L->block_end());
104    Start = L->getHeader();
105  }
106
107  DFS(Start, Checklist);
108  for (auto &BB : Checklist) {
109    BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
110      BasicBlock::iterator(Load) : BB->end();
111    auto Q = MDR->getPointerDependencyFrom(MemoryLocation(Ptr), true,
112                                           StartIt, BB, Load);
113    if (Q.isClobber() || Q.isUnknown())
114      return true;
115  }
116  return false;
117}
118
119void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
120  if (DA->isUniform(&I))
121    setUniformMetadata(I.getParent()->getTerminator());
122}
123
124void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
125  Value *Ptr = I.getPointerOperand();
126  if (!DA->isUniform(Ptr))
127    return;
128  auto isGlobalLoad = [&](LoadInst &Load)->bool {
129    return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
130  };
131  // We're tracking up to the Function boundaries, and cannot go beyond because
132  // of FunctionPass restrictions. We can ensure that is memory not clobbered
133  // for memory operations that are live in to entry points only.
134  Instruction *PtrI = dyn_cast<Instruction>(Ptr);
135
136  if (!isEntryFunc) {
137    if (PtrI)
138      setUniformMetadata(PtrI);
139    return;
140  }
141
142  bool NotClobbered = false;
143  if (PtrI)
144    NotClobbered = !isClobberedInFunction(&I);
145  else if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
146    if (isGlobalLoad(I) && !isClobberedInFunction(&I)) {
147      NotClobbered = true;
148      // Lookup for the existing GEP
149      if (noClobberClones.count(Ptr)) {
150        PtrI = noClobberClones[Ptr];
151      } else {
152        // Create GEP of the Value
153        Function *F = I.getParent()->getParent();
154        Value *Idx = Constant::getIntegerValue(
155          Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
156        // Insert GEP at the entry to make it dominate all uses
157        PtrI = GetElementPtrInst::Create(
158          Ptr->getType()->getPointerElementType(), Ptr,
159          ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI());
160      }
161      I.replaceUsesOfWith(Ptr, PtrI);
162    }
163  }
164
165  if (PtrI) {
166    setUniformMetadata(PtrI);
167    if (NotClobbered)
168      setNoClobberMetadata(PtrI);
169  }
170}
171
172bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
173  return false;
174}
175
176bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
177  if (skipFunction(F))
178    return false;
179
180  DA  = &getAnalysis<LegacyDivergenceAnalysis>();
181  MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
182  LI  = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
183  isEntryFunc = AMDGPU::isEntryFunctionCC(F.getCallingConv());
184
185  visit(F);
186  noClobberClones.clear();
187  return true;
188}
189
190FunctionPass *
191llvm::createAMDGPUAnnotateUniformValues() {
192  return new AMDGPUAnnotateUniformValues();
193}
194