1//===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass performs exec mask handling peephole optimizations which needs
11/// to be done before register allocation to reduce register pressure.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUSubtarget.h"
17#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18#include "SIInstrInfo.h"
19#include "llvm/CodeGen/LiveIntervals.h"
20#include "llvm/CodeGen/MachineFunctionPass.h"
21#include "llvm/InitializePasses.h"
22
23using namespace llvm;
24
25#define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
26
27namespace {
28
29class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
30private:
31  const SIRegisterInfo *TRI;
32  const SIInstrInfo *TII;
33  MachineRegisterInfo *MRI;
34
35public:
36  static char ID;
37
38  SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
39    initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
40  }
41
42  bool runOnMachineFunction(MachineFunction &MF) override;
43
44  StringRef getPassName() const override {
45    return "SI optimize exec mask operations pre-RA";
46  }
47
48  void getAnalysisUsage(AnalysisUsage &AU) const override {
49    AU.addRequired<LiveIntervals>();
50    AU.setPreservesAll();
51    MachineFunctionPass::getAnalysisUsage(AU);
52  }
53};
54
55} // End anonymous namespace.
56
57INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
58                      "SI optimize exec mask operations pre-RA", false, false)
59INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
60INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
61                    "SI optimize exec mask operations pre-RA", false, false)
62
63char SIOptimizeExecMaskingPreRA::ID = 0;
64
65char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
66
67FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
68  return new SIOptimizeExecMaskingPreRA();
69}
70
71static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
72  unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
73
74  if (MI.isFullCopy() && MI.getOperand(1).getReg() == Exec)
75    return true;
76
77  return false;
78}
79
80// Optimize sequence
81//    %sel = V_CNDMASK_B32_e64 0, 1, %cc
82//    %cmp = V_CMP_NE_U32 1, %1
83//    $vcc = S_AND_B64 $exec, %cmp
84//    S_CBRANCH_VCC[N]Z
85// =>
86//    $vcc = S_ANDN2_B64 $exec, %cc
87//    S_CBRANCH_VCC[N]Z
88//
89// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
90// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
91// only 3 first instructions are really needed. S_AND_B64 with exec is a
92// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
93// lanes.
94//
95// Returns %cc register on success.
96static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
97                                     const GCNSubtarget &ST,
98                                     MachineRegisterInfo &MRI,
99                                     LiveIntervals *LIS) {
100  const SIRegisterInfo *TRI = ST.getRegisterInfo();
101  const SIInstrInfo *TII = ST.getInstrInfo();
102  bool Wave32 = ST.isWave32();
103  const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
104  const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
105  const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
106  const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
107
108  auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
109                           unsigned Opc = MI.getOpcode();
110                           return Opc == AMDGPU::S_CBRANCH_VCCZ ||
111                                  Opc == AMDGPU::S_CBRANCH_VCCNZ; });
112  if (I == MBB.terminators().end())
113    return AMDGPU::NoRegister;
114
115  auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
116                                   *I, MRI, LIS);
117  if (!And || And->getOpcode() != AndOpc ||
118      !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
119    return AMDGPU::NoRegister;
120
121  MachineOperand *AndCC = &And->getOperand(1);
122  Register CmpReg = AndCC->getReg();
123  unsigned CmpSubReg = AndCC->getSubReg();
124  if (CmpReg == ExecReg) {
125    AndCC = &And->getOperand(2);
126    CmpReg = AndCC->getReg();
127    CmpSubReg = AndCC->getSubReg();
128  } else if (And->getOperand(2).getReg() != ExecReg) {
129    return AMDGPU::NoRegister;
130  }
131
132  auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
133  if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
134                Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
135      Cmp->getParent() != And->getParent())
136    return AMDGPU::NoRegister;
137
138  MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
139  MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
140  if (Op1->isImm() && Op2->isReg())
141    std::swap(Op1, Op2);
142  if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
143    return AMDGPU::NoRegister;
144
145  Register SelReg = Op1->getReg();
146  auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
147  if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
148    return AMDGPU::NoRegister;
149
150  if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
151      TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
152    return AMDGPU::NoRegister;
153
154  Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
155  Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
156  MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
157  if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
158      Op1->getImm() != 0 || Op2->getImm() != 1)
159    return AMDGPU::NoRegister;
160
161  LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
162                    << *And);
163
164  Register CCReg = CC->getReg();
165  LIS->RemoveMachineInstrFromMaps(*And);
166  MachineInstr *Andn2 =
167      BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
168              And->getOperand(0).getReg())
169          .addReg(ExecReg)
170          .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
171  MachineOperand &AndSCC = And->getOperand(3);
172  assert(AndSCC.getReg() == AMDGPU::SCC);
173  MachineOperand &Andn2SCC = Andn2->getOperand(3);
174  assert(Andn2SCC.getReg() == AMDGPU::SCC);
175  Andn2SCC.setIsDead(AndSCC.isDead());
176  And->eraseFromParent();
177  LIS->InsertMachineInstrInMaps(*Andn2);
178
179  LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
180
181  // Try to remove compare. Cmp value should not used in between of cmp
182  // and s_and_b64 if VCC or just unused if any other register.
183  if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
184      (CmpReg == CondReg &&
185       std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
186                    [&](const MachineInstr &MI) {
187                      return MI.readsRegister(CondReg, TRI);
188                    }))) {
189    LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
190
191    LIS->RemoveMachineInstrFromMaps(*Cmp);
192    Cmp->eraseFromParent();
193
194    // Try to remove v_cndmask_b32.
195    if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
196      LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
197
198      LIS->RemoveMachineInstrFromMaps(*Sel);
199      Sel->eraseFromParent();
200    }
201  }
202
203  return CCReg;
204}
205
206bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
207  if (skipFunction(MF.getFunction()))
208    return false;
209
210  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
211  TRI = ST.getRegisterInfo();
212  TII = ST.getInstrInfo();
213  MRI = &MF.getRegInfo();
214
215  MachineRegisterInfo &MRI = MF.getRegInfo();
216  LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
217  DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
218  unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
219  bool Changed = false;
220
221  for (MachineBasicBlock &MBB : MF) {
222
223    if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
224      RecalcRegs.insert(Reg);
225      RecalcRegs.insert(AMDGPU::VCC_LO);
226      RecalcRegs.insert(AMDGPU::VCC_HI);
227      RecalcRegs.insert(AMDGPU::SCC);
228      Changed = true;
229    }
230
231    // Try to remove unneeded instructions before s_endpgm.
232    if (MBB.succ_empty()) {
233      if (MBB.empty())
234        continue;
235
236      // Skip this if the endpgm has any implicit uses, otherwise we would need
237      // to be careful to update / remove them.
238      // S_ENDPGM always has a single imm operand that is not used other than to
239      // end up in the encoding
240      MachineInstr &Term = MBB.back();
241      if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
242        continue;
243
244      SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
245
246      while (!Blocks.empty()) {
247        auto CurBB = Blocks.pop_back_val();
248        auto I = CurBB->rbegin(), E = CurBB->rend();
249        if (I != E) {
250          if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
251            ++I;
252          else if (I->isBranch())
253            continue;
254        }
255
256        while (I != E) {
257          if (I->isDebugInstr()) {
258            I = std::next(I);
259            continue;
260          }
261
262          if (I->mayStore() || I->isBarrier() || I->isCall() ||
263              I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
264            break;
265
266          LLVM_DEBUG(dbgs()
267                     << "Removing no effect instruction: " << *I << '\n');
268
269          for (auto &Op : I->operands()) {
270            if (Op.isReg())
271              RecalcRegs.insert(Op.getReg());
272          }
273
274          auto Next = std::next(I);
275          LIS->RemoveMachineInstrFromMaps(*I);
276          I->eraseFromParent();
277          I = Next;
278
279          Changed = true;
280        }
281
282        if (I != E)
283          continue;
284
285        // Try to ascend predecessors.
286        for (auto *Pred : CurBB->predecessors()) {
287          if (Pred->succ_size() == 1)
288            Blocks.push_back(Pred);
289        }
290      }
291      continue;
292    }
293
294    // If the only user of a logical operation is move to exec, fold it now
295    // to prevent forming of saveexec. I.e:
296    //
297    //    %0:sreg_64 = COPY $exec
298    //    %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
299    // =>
300    //    %1 = S_AND_B64 $exec, %2:sreg_64
301    unsigned ScanThreshold = 10;
302    for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
303         && ScanThreshold--; ++I) {
304      if (!isFullExecCopy(*I, ST))
305        continue;
306
307      Register SavedExec = I->getOperand(0).getReg();
308      if (SavedExec.isVirtual() && MRI.hasOneNonDBGUse(SavedExec) &&
309          MRI.use_instr_nodbg_begin(SavedExec)->getParent() == I->getParent()) {
310        LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
311        LIS->RemoveMachineInstrFromMaps(*I);
312        I->eraseFromParent();
313        MRI.replaceRegWith(SavedExec, Exec);
314        LIS->removeInterval(SavedExec);
315        Changed = true;
316      }
317      break;
318    }
319  }
320
321  if (Changed) {
322    for (auto Reg : RecalcRegs) {
323      if (Register::isVirtualRegister(Reg)) {
324        LIS->removeInterval(Reg);
325        if (!MRI.reg_empty(Reg))
326          LIS->createAndComputeVirtRegInterval(Reg);
327      } else {
328        LIS->removeAllRegUnitsForPhysReg(Reg);
329      }
330    }
331  }
332
333  return Changed;
334}
335