1//===-- AMDGPUAlwaysInlinePass.cpp - Promote Allocas ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass marks all internal functions as always_inline and creates
11/// duplicates of all other functions and marks the duplicates as always_inline.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
17#include "Utils/AMDGPUBaseInfo.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/IR/Module.h"
20#include "llvm/Transforms/Utils/Cloning.h"
21
22using namespace llvm;
23
24namespace {
25
26static cl::opt<bool> StressCalls(
27  "amdgpu-stress-function-calls",
28  cl::Hidden,
29  cl::desc("Force all functions to be noinline"),
30  cl::init(false));
31
32class AMDGPUAlwaysInline : public ModulePass {
33  bool GlobalOpt;
34
35  void recursivelyVisitUsers(GlobalValue &GV,
36                             SmallPtrSetImpl<Function *> &FuncsToAlwaysInline);
37public:
38  static char ID;
39
40  AMDGPUAlwaysInline(bool GlobalOpt = false) :
41    ModulePass(ID), GlobalOpt(GlobalOpt) { }
42  bool runOnModule(Module &M) override;
43
44  void getAnalysisUsage(AnalysisUsage &AU) const override {
45    AU.setPreservesAll();
46 }
47};
48
49} // End anonymous namespace
50
51INITIALIZE_PASS(AMDGPUAlwaysInline, "amdgpu-always-inline",
52                "AMDGPU Inline All Functions", false, false)
53
54char AMDGPUAlwaysInline::ID = 0;
55
56void AMDGPUAlwaysInline::recursivelyVisitUsers(
57  GlobalValue &GV,
58  SmallPtrSetImpl<Function *> &FuncsToAlwaysInline) {
59  SmallVector<User *, 16> Stack;
60
61  SmallPtrSet<const Value *, 8> Visited;
62
63  for (User *U : GV.users())
64    Stack.push_back(U);
65
66  while (!Stack.empty()) {
67    User *U = Stack.pop_back_val();
68    if (!Visited.insert(U).second)
69      continue;
70
71    if (Instruction *I = dyn_cast<Instruction>(U)) {
72      Function *F = I->getParent()->getParent();
73      if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
74        // FIXME: This is a horrible hack. We should always respect noinline,
75        // and just let us hit the error when we can't handle this.
76        //
77        // Unfortunately, clang adds noinline to all functions at -O0. We have
78        // to override this here. until that's fixed.
79        F->removeFnAttr(Attribute::NoInline);
80
81        FuncsToAlwaysInline.insert(F);
82        Stack.push_back(F);
83      }
84
85      // No need to look at further users, but we do need to inline any callers.
86      continue;
87    }
88
89    for (User *UU : U->users())
90      Stack.push_back(UU);
91  }
92}
93
94bool AMDGPUAlwaysInline::runOnModule(Module &M) {
95  std::vector<GlobalAlias*> AliasesToRemove;
96
97  SmallPtrSet<Function *, 8> FuncsToAlwaysInline;
98  SmallPtrSet<Function *, 8> FuncsToNoInline;
99
100  for (GlobalAlias &A : M.aliases()) {
101    if (Function* F = dyn_cast<Function>(A.getAliasee())) {
102      A.replaceAllUsesWith(F);
103      AliasesToRemove.push_back(&A);
104    }
105
106    // FIXME: If the aliasee isn't a function, it's some kind of constant expr
107    // cast that won't be inlined through.
108  }
109
110  if (GlobalOpt) {
111    for (GlobalAlias* A : AliasesToRemove) {
112      A->eraseFromParent();
113    }
114  }
115
116  // Always force inlining of any function that uses an LDS global address. This
117  // is something of a workaround because we don't have a way of supporting LDS
118  // objects defined in functions. LDS is always allocated by a kernel, and it
119  // is difficult to manage LDS usage if a function may be used by multiple
120  // kernels.
121  //
122  // OpenCL doesn't allow declaring LDS in non-kernels, so in practice this
123  // should only appear when IPO passes manages to move LDs defined in a kernel
124  // into a single user function.
125
126  for (GlobalVariable &GV : M.globals()) {
127    // TODO: Region address
128    unsigned AS = GV.getAddressSpace();
129    if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::REGION_ADDRESS)
130      continue;
131
132    recursivelyVisitUsers(GV, FuncsToAlwaysInline);
133  }
134
135  if (!AMDGPUTargetMachine::EnableFunctionCalls || StressCalls) {
136    auto IncompatAttr
137      = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline;
138
139    for (Function &F : M) {
140      if (!F.isDeclaration() && !F.use_empty() &&
141          !F.hasFnAttribute(IncompatAttr)) {
142        if (StressCalls) {
143          if (!FuncsToAlwaysInline.count(&F))
144            FuncsToNoInline.insert(&F);
145        } else
146          FuncsToAlwaysInline.insert(&F);
147      }
148    }
149  }
150
151  for (Function *F : FuncsToAlwaysInline)
152    F->addFnAttr(Attribute::AlwaysInline);
153
154  for (Function *F : FuncsToNoInline)
155    F->addFnAttr(Attribute::NoInline);
156
157  return !FuncsToAlwaysInline.empty() || !FuncsToNoInline.empty();
158}
159
160ModulePass *llvm::createAMDGPUAlwaysInlinePass(bool GlobalOpt) {
161  return new AMDGPUAlwaysInline(GlobalOpt);
162}
163
164