1//=======- GCNDPPCombine.cpp - optimization for DPP instructions ---==========//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8// The pass combines V_MOV_B32_dpp instruction with its VALU uses as a DPP src0
9// operand. If any of the use instruction cannot be combined with the mov the
10// whole sequence is reverted.
11//
12// $old = ...
13// $dpp_value = V_MOV_B32_dpp $old, $vgpr_to_be_read_from_other_lane,
14//                            dpp_controls..., $row_mask, $bank_mask, $bound_ctrl
15// $res = VALU $dpp_value [, src1]
16//
17// to
18//
19// $res = VALU_DPP $combined_old, $vgpr_to_be_read_from_other_lane, [src1,]
20//                 dpp_controls..., $row_mask, $bank_mask, $combined_bound_ctrl
21//
22// Combining rules :
23//
24// if $row_mask and $bank_mask are fully enabled (0xF) and
25//    $bound_ctrl==DPP_BOUND_ZERO or $old==0
26// -> $combined_old = undef,
27//    $combined_bound_ctrl = DPP_BOUND_ZERO
28//
29// if the VALU op is binary and
30//    $bound_ctrl==DPP_BOUND_OFF and
31//    $old==identity value (immediate) for the VALU op
32// -> $combined_old = src1,
33//    $combined_bound_ctrl = DPP_BOUND_OFF
34//
35// Otherwise cancel.
36//
37// The mov_dpp instruction should reside in the same BB as all its uses
38//===----------------------------------------------------------------------===//
39
40#include "AMDGPU.h"
41#include "GCNSubtarget.h"
42#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
43#include "llvm/ADT/Statistic.h"
44#include "llvm/CodeGen/MachineFunctionPass.h"
45
46using namespace llvm;
47
48#define DEBUG_TYPE "gcn-dpp-combine"
49
50STATISTIC(NumDPPMovsCombined, "Number of DPP moves combined.");
51
52namespace {
53
54class GCNDPPCombine : public MachineFunctionPass {
55  MachineRegisterInfo *MRI;
56  const SIInstrInfo *TII;
57  const GCNSubtarget *ST;
58
59  using RegSubRegPair = TargetInstrInfo::RegSubRegPair;
60
61  MachineOperand *getOldOpndValue(MachineOperand &OldOpnd) const;
62
63  MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
64                              RegSubRegPair CombOldVGPR,
65                              MachineOperand *OldOpnd, bool CombBCZ,
66                              bool IsShrinkable) const;
67
68  MachineInstr *createDPPInst(MachineInstr &OrigMI, MachineInstr &MovMI,
69                              RegSubRegPair CombOldVGPR, bool CombBCZ,
70                              bool IsShrinkable) const;
71
72  bool hasNoImmOrEqual(MachineInstr &MI,
73                       unsigned OpndName,
74                       int64_t Value,
75                       int64_t Mask = -1) const;
76
77  bool combineDPPMov(MachineInstr &MI) const;
78
79public:
80  static char ID;
81
82  GCNDPPCombine() : MachineFunctionPass(ID) {
83    initializeGCNDPPCombinePass(*PassRegistry::getPassRegistry());
84  }
85
86  bool runOnMachineFunction(MachineFunction &MF) override;
87
88  StringRef getPassName() const override { return "GCN DPP Combine"; }
89
90  void getAnalysisUsage(AnalysisUsage &AU) const override {
91    AU.setPreservesCFG();
92    MachineFunctionPass::getAnalysisUsage(AU);
93  }
94
95  MachineFunctionProperties getRequiredProperties() const override {
96    return MachineFunctionProperties()
97      .set(MachineFunctionProperties::Property::IsSSA);
98  }
99
100private:
101  int getDPPOp(unsigned Op, bool IsShrinkable) const;
102  bool isShrinkable(MachineInstr &MI) const;
103};
104
105} // end anonymous namespace
106
107INITIALIZE_PASS(GCNDPPCombine, DEBUG_TYPE, "GCN DPP Combine", false, false)
108
109char GCNDPPCombine::ID = 0;
110
111char &llvm::GCNDPPCombineID = GCNDPPCombine::ID;
112
113FunctionPass *llvm::createGCNDPPCombinePass() {
114  return new GCNDPPCombine();
115}
116
117bool GCNDPPCombine::isShrinkable(MachineInstr &MI) const {
118  unsigned Op = MI.getOpcode();
119  if (!TII->isVOP3(Op)) {
120    return false;
121  }
122  if (!TII->hasVALU32BitEncoding(Op)) {
123    LLVM_DEBUG(dbgs() << "  Inst hasn't e32 equivalent\n");
124    return false;
125  }
126  // Do not shrink True16 instructions pre-RA to avoid the restriction in
127  // register allocation from only being able to use 128 VGPRs
128  if (AMDGPU::isTrue16Inst(Op))
129    return false;
130  if (const auto *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) {
131    // Give up if there are any uses of the sdst in carry-out or VOPC.
132    // The shrunken form of the instruction would write it to vcc instead of to
133    // a virtual register. If we rewrote the uses the shrinking would be
134    // possible.
135    if (!MRI->use_nodbg_empty(SDst->getReg()))
136      return false;
137  }
138  // check if other than abs|neg modifiers are set (opsel for example)
139  const int64_t Mask = ~(SISrcMods::ABS | SISrcMods::NEG);
140  if (!hasNoImmOrEqual(MI, AMDGPU::OpName::src0_modifiers, 0, Mask) ||
141      !hasNoImmOrEqual(MI, AMDGPU::OpName::src1_modifiers, 0, Mask) ||
142      !hasNoImmOrEqual(MI, AMDGPU::OpName::clamp, 0) ||
143      !hasNoImmOrEqual(MI, AMDGPU::OpName::omod, 0)) {
144    LLVM_DEBUG(dbgs() << "  Inst has non-default modifiers\n");
145    return false;
146  }
147  return true;
148}
149
150int GCNDPPCombine::getDPPOp(unsigned Op, bool IsShrinkable) const {
151  int DPP32 = AMDGPU::getDPPOp32(Op);
152  if (IsShrinkable) {
153    assert(DPP32 == -1);
154    int E32 = AMDGPU::getVOPe32(Op);
155    DPP32 = (E32 == -1) ? -1 : AMDGPU::getDPPOp32(E32);
156  }
157  if (DPP32 != -1 && TII->pseudoToMCOpcode(DPP32) != -1)
158    return DPP32;
159  int DPP64 = -1;
160  if (ST->hasVOP3DPP())
161    DPP64 = AMDGPU::getDPPOp64(Op);
162  if (DPP64 != -1 && TII->pseudoToMCOpcode(DPP64) != -1)
163    return DPP64;
164  return -1;
165}
166
167// tracks the register operand definition and returns:
168//   1. immediate operand used to initialize the register if found
169//   2. nullptr if the register operand is undef
170//   3. the operand itself otherwise
171MachineOperand *GCNDPPCombine::getOldOpndValue(MachineOperand &OldOpnd) const {
172  auto *Def = getVRegSubRegDef(getRegSubRegPair(OldOpnd), *MRI);
173  if (!Def)
174    return nullptr;
175
176  switch(Def->getOpcode()) {
177  default: break;
178  case AMDGPU::IMPLICIT_DEF:
179    return nullptr;
180  case AMDGPU::COPY:
181  case AMDGPU::V_MOV_B32_e32:
182  case AMDGPU::V_MOV_B64_PSEUDO:
183  case AMDGPU::V_MOV_B64_e32:
184  case AMDGPU::V_MOV_B64_e64: {
185    auto &Op1 = Def->getOperand(1);
186    if (Op1.isImm())
187      return &Op1;
188    break;
189  }
190  }
191  return &OldOpnd;
192}
193
194[[maybe_unused]] static unsigned getOperandSize(MachineInstr &MI, unsigned Idx,
195                               MachineRegisterInfo &MRI) {
196  int16_t RegClass = MI.getDesc().operands()[Idx].RegClass;
197  if (RegClass == -1)
198    return 0;
199
200  const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
201  return TRI->getRegSizeInBits(*TRI->getRegClass(RegClass));
202}
203
204MachineInstr *GCNDPPCombine::createDPPInst(MachineInstr &OrigMI,
205                                           MachineInstr &MovMI,
206                                           RegSubRegPair CombOldVGPR,
207                                           bool CombBCZ,
208                                           bool IsShrinkable) const {
209  assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
210         MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
211         MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
212
213  bool HasVOP3DPP = ST->hasVOP3DPP();
214  auto OrigOp = OrigMI.getOpcode();
215  auto DPPOp = getDPPOp(OrigOp, IsShrinkable);
216  if (DPPOp == -1) {
217    LLVM_DEBUG(dbgs() << "  failed: no DPP opcode\n");
218    return nullptr;
219  }
220  int OrigOpE32 = AMDGPU::getVOPe32(OrigOp);
221  // Prior checks cover Mask with VOPC condition, but not on purpose
222  auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
223  assert(RowMaskOpnd && RowMaskOpnd->isImm());
224  auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
225  assert(BankMaskOpnd && BankMaskOpnd->isImm());
226  const bool MaskAllLanes =
227      RowMaskOpnd->getImm() == 0xF && BankMaskOpnd->getImm() == 0xF;
228  (void)MaskAllLanes;
229  assert((MaskAllLanes ||
230          !(TII->isVOPC(DPPOp) || (TII->isVOP3(DPPOp) && OrigOpE32 != -1 &&
231                                   TII->isVOPC(OrigOpE32)))) &&
232         "VOPC cannot form DPP unless mask is full");
233
234  auto DPPInst = BuildMI(*OrigMI.getParent(), OrigMI,
235                         OrigMI.getDebugLoc(), TII->get(DPPOp))
236    .setMIFlags(OrigMI.getFlags());
237
238  bool Fail = false;
239  do {
240    int NumOperands = 0;
241    if (auto *Dst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst)) {
242      DPPInst.add(*Dst);
243      ++NumOperands;
244    }
245    if (auto *SDst = TII->getNamedOperand(OrigMI, AMDGPU::OpName::sdst)) {
246      if (TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, SDst)) {
247        DPPInst.add(*SDst);
248        ++NumOperands;
249      }
250      // If we shrunk a 64bit vop3b to 32bits, just ignore the sdst
251    }
252
253    const int OldIdx = AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::old);
254    if (OldIdx != -1) {
255      assert(OldIdx == NumOperands);
256      assert(isOfRegClass(
257          CombOldVGPR,
258          *MRI->getRegClass(
259              TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst)->getReg()),
260          *MRI));
261      auto *Def = getVRegSubRegDef(CombOldVGPR, *MRI);
262      DPPInst.addReg(CombOldVGPR.Reg, Def ? 0 : RegState::Undef,
263                     CombOldVGPR.SubReg);
264      ++NumOperands;
265    } else if (TII->isVOPC(DPPOp) || (TII->isVOP3(DPPOp) && OrigOpE32 != -1 &&
266                                      TII->isVOPC(OrigOpE32))) {
267      // VOPC DPP and VOPC promoted to VOP3 DPP do not have an old operand
268      // because they write to SGPRs not VGPRs
269    } else {
270      // TODO: this discards MAC/FMA instructions for now, let's add it later
271      LLVM_DEBUG(dbgs() << "  failed: no old operand in DPP instruction,"
272                           " TBD\n");
273      Fail = true;
274      break;
275    }
276
277    auto *Mod0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0_modifiers);
278    if (Mod0) {
279      assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
280                                          AMDGPU::OpName::src0_modifiers));
281      assert(HasVOP3DPP ||
282             (0LL == (Mod0->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
283      DPPInst.addImm(Mod0->getImm());
284      ++NumOperands;
285    } else if (AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::src0_modifiers)) {
286      DPPInst.addImm(0);
287      ++NumOperands;
288    }
289    auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
290    assert(Src0);
291    int Src0Idx = NumOperands;
292    if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) {
293      LLVM_DEBUG(dbgs() << "  failed: src0 is illegal\n");
294      Fail = true;
295      break;
296    }
297    DPPInst.add(*Src0);
298    DPPInst->getOperand(NumOperands).setIsKill(false);
299    ++NumOperands;
300
301    auto *Mod1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1_modifiers);
302    if (Mod1) {
303      assert(NumOperands == AMDGPU::getNamedOperandIdx(DPPOp,
304                                          AMDGPU::OpName::src1_modifiers));
305      assert(HasVOP3DPP ||
306             (0LL == (Mod1->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
307      DPPInst.addImm(Mod1->getImm());
308      ++NumOperands;
309    } else if (AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::src1_modifiers)) {
310      DPPInst.addImm(0);
311      ++NumOperands;
312    }
313    auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
314    if (Src1) {
315      int OpNum = NumOperands;
316      // If subtarget does not support SGPRs for src1 operand then the
317      // requirements are the same as for src0. We check src0 instead because
318      // pseudos are shared between subtargets and allow SGPR for src1 on all.
319      if (!ST->hasDPPSrc1SGPR()) {
320        assert(getOperandSize(*DPPInst, Src0Idx, *MRI) ==
321                   getOperandSize(*DPPInst, NumOperands, *MRI) &&
322               "Src0 and Src1 operands should have the same size");
323        OpNum = Src0Idx;
324      }
325      if (!TII->isOperandLegal(*DPPInst.getInstr(), OpNum, Src1)) {
326        LLVM_DEBUG(dbgs() << "  failed: src1 is illegal\n");
327        Fail = true;
328        break;
329      }
330      DPPInst.add(*Src1);
331      ++NumOperands;
332    }
333
334    auto *Mod2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2_modifiers);
335    if (Mod2) {
336      assert(NumOperands ==
337             AMDGPU::getNamedOperandIdx(DPPOp, AMDGPU::OpName::src2_modifiers));
338      assert(HasVOP3DPP ||
339             (0LL == (Mod2->getImm() & ~(SISrcMods::ABS | SISrcMods::NEG))));
340      DPPInst.addImm(Mod2->getImm());
341      ++NumOperands;
342    }
343    auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
344    if (Src2) {
345      if (!TII->getNamedOperand(*DPPInst.getInstr(), AMDGPU::OpName::src2) ||
346          !TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src2)) {
347        LLVM_DEBUG(dbgs() << "  failed: src2 is illegal\n");
348        Fail = true;
349        break;
350      }
351      DPPInst.add(*Src2);
352      ++NumOperands;
353    }
354
355    if (HasVOP3DPP) {
356      auto *ClampOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::clamp);
357      if (ClampOpr && AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::clamp)) {
358        DPPInst.addImm(ClampOpr->getImm());
359      }
360      auto *VdstInOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::vdst_in);
361      if (VdstInOpr &&
362          AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::vdst_in)) {
363        DPPInst.add(*VdstInOpr);
364      }
365      auto *OmodOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::omod);
366      if (OmodOpr && AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::omod)) {
367        DPPInst.addImm(OmodOpr->getImm());
368      }
369      // Validate OP_SEL has to be set to all 0 and OP_SEL_HI has to be set to
370      // all 1.
371      if (auto *OpSelOpr =
372              TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel)) {
373        int64_t OpSel = 0;
374        OpSel |= (Mod0 ? (!!(Mod0->getImm() & SISrcMods::OP_SEL_0) << 0) : 0);
375        OpSel |= (Mod1 ? (!!(Mod1->getImm() & SISrcMods::OP_SEL_0) << 1) : 0);
376        OpSel |= (Mod2 ? (!!(Mod2->getImm() & SISrcMods::OP_SEL_0) << 2) : 0);
377        if (Mod0 && TII->isVOP3(OrigMI) && !TII->isVOP3P(OrigMI))
378          OpSel |= !!(Mod0->getImm() & SISrcMods::DST_OP_SEL) << 3;
379
380        if (OpSel != 0) {
381          LLVM_DEBUG(dbgs() << "  failed: op_sel must be zero\n");
382          Fail = true;
383          break;
384        }
385        if (AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::op_sel))
386          DPPInst.addImm(OpSel);
387      }
388      if (auto *OpSelHiOpr =
389              TII->getNamedOperand(OrigMI, AMDGPU::OpName::op_sel_hi)) {
390        int64_t OpSelHi = 0;
391        OpSelHi |= (Mod0 ? (!!(Mod0->getImm() & SISrcMods::OP_SEL_1) << 0) : 0);
392        OpSelHi |= (Mod1 ? (!!(Mod1->getImm() & SISrcMods::OP_SEL_1) << 1) : 0);
393        OpSelHi |= (Mod2 ? (!!(Mod2->getImm() & SISrcMods::OP_SEL_1) << 2) : 0);
394
395        // Only vop3p has op_sel_hi, and all vop3p have 3 operands, so check
396        // the bitmask for 3 op_sel_hi bits set
397        assert(Src2 && "Expected vop3p with 3 operands");
398        if (OpSelHi != 7) {
399          LLVM_DEBUG(dbgs() << "  failed: op_sel_hi must be all set to one\n");
400          Fail = true;
401          break;
402        }
403        if (AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::op_sel_hi))
404          DPPInst.addImm(OpSelHi);
405      }
406      auto *NegOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_lo);
407      if (NegOpr && AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::neg_lo)) {
408        DPPInst.addImm(NegOpr->getImm());
409      }
410      auto *NegHiOpr = TII->getNamedOperand(OrigMI, AMDGPU::OpName::neg_hi);
411      if (NegHiOpr && AMDGPU::hasNamedOperand(DPPOp, AMDGPU::OpName::neg_hi)) {
412        DPPInst.addImm(NegHiOpr->getImm());
413      }
414    }
415    DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl));
416    DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask));
417    DPPInst.add(*TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask));
418    DPPInst.addImm(CombBCZ ? 1 : 0);
419  } while (false);
420
421  if (Fail) {
422    DPPInst.getInstr()->eraseFromParent();
423    return nullptr;
424  }
425  LLVM_DEBUG(dbgs() << "  combined:  " << *DPPInst.getInstr());
426  return DPPInst.getInstr();
427}
428
429static bool isIdentityValue(unsigned OrigMIOp, MachineOperand *OldOpnd) {
430  assert(OldOpnd->isImm());
431  switch (OrigMIOp) {
432  default: break;
433  case AMDGPU::V_ADD_U32_e32:
434  case AMDGPU::V_ADD_U32_e64:
435  case AMDGPU::V_ADD_CO_U32_e32:
436  case AMDGPU::V_ADD_CO_U32_e64:
437  case AMDGPU::V_OR_B32_e32:
438  case AMDGPU::V_OR_B32_e64:
439  case AMDGPU::V_SUBREV_U32_e32:
440  case AMDGPU::V_SUBREV_U32_e64:
441  case AMDGPU::V_SUBREV_CO_U32_e32:
442  case AMDGPU::V_SUBREV_CO_U32_e64:
443  case AMDGPU::V_MAX_U32_e32:
444  case AMDGPU::V_MAX_U32_e64:
445  case AMDGPU::V_XOR_B32_e32:
446  case AMDGPU::V_XOR_B32_e64:
447    if (OldOpnd->getImm() == 0)
448      return true;
449    break;
450  case AMDGPU::V_AND_B32_e32:
451  case AMDGPU::V_AND_B32_e64:
452  case AMDGPU::V_MIN_U32_e32:
453  case AMDGPU::V_MIN_U32_e64:
454    if (static_cast<uint32_t>(OldOpnd->getImm()) ==
455        std::numeric_limits<uint32_t>::max())
456      return true;
457    break;
458  case AMDGPU::V_MIN_I32_e32:
459  case AMDGPU::V_MIN_I32_e64:
460    if (static_cast<int32_t>(OldOpnd->getImm()) ==
461        std::numeric_limits<int32_t>::max())
462      return true;
463    break;
464  case AMDGPU::V_MAX_I32_e32:
465  case AMDGPU::V_MAX_I32_e64:
466    if (static_cast<int32_t>(OldOpnd->getImm()) ==
467        std::numeric_limits<int32_t>::min())
468      return true;
469    break;
470  case AMDGPU::V_MUL_I32_I24_e32:
471  case AMDGPU::V_MUL_I32_I24_e64:
472  case AMDGPU::V_MUL_U32_U24_e32:
473  case AMDGPU::V_MUL_U32_U24_e64:
474    if (OldOpnd->getImm() == 1)
475      return true;
476    break;
477  }
478  return false;
479}
480
481MachineInstr *GCNDPPCombine::createDPPInst(
482    MachineInstr &OrigMI, MachineInstr &MovMI, RegSubRegPair CombOldVGPR,
483    MachineOperand *OldOpndValue, bool CombBCZ, bool IsShrinkable) const {
484  assert(CombOldVGPR.Reg);
485  if (!CombBCZ && OldOpndValue && OldOpndValue->isImm()) {
486    auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
487    if (!Src1 || !Src1->isReg()) {
488      LLVM_DEBUG(dbgs() << "  failed: no src1 or it isn't a register\n");
489      return nullptr;
490    }
491    if (!isIdentityValue(OrigMI.getOpcode(), OldOpndValue)) {
492      LLVM_DEBUG(dbgs() << "  failed: old immediate isn't an identity\n");
493      return nullptr;
494    }
495    CombOldVGPR = getRegSubRegPair(*Src1);
496    auto MovDst = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
497    const TargetRegisterClass *RC = MRI->getRegClass(MovDst->getReg());
498    if (!isOfRegClass(CombOldVGPR, *RC, *MRI)) {
499      LLVM_DEBUG(dbgs() << "  failed: src1 has wrong register class\n");
500      return nullptr;
501    }
502  }
503  return createDPPInst(OrigMI, MovMI, CombOldVGPR, CombBCZ, IsShrinkable);
504}
505
506// returns true if MI doesn't have OpndName immediate operand or the
507// operand has Value
508bool GCNDPPCombine::hasNoImmOrEqual(MachineInstr &MI, unsigned OpndName,
509                                    int64_t Value, int64_t Mask) const {
510  auto *Imm = TII->getNamedOperand(MI, OpndName);
511  if (!Imm)
512    return true;
513
514  assert(Imm->isImm());
515  return (Imm->getImm() & Mask) == Value;
516}
517
518bool GCNDPPCombine::combineDPPMov(MachineInstr &MovMI) const {
519  assert(MovMI.getOpcode() == AMDGPU::V_MOV_B32_dpp ||
520         MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp ||
521         MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO);
522  LLVM_DEBUG(dbgs() << "\nDPP combine: " << MovMI);
523
524  auto *DstOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::vdst);
525  assert(DstOpnd && DstOpnd->isReg());
526  auto DPPMovReg = DstOpnd->getReg();
527  if (DPPMovReg.isPhysical()) {
528    LLVM_DEBUG(dbgs() << "  failed: dpp move writes physreg\n");
529    return false;
530  }
531  if (execMayBeModifiedBeforeAnyUse(*MRI, DPPMovReg, MovMI)) {
532    LLVM_DEBUG(dbgs() << "  failed: EXEC mask should remain the same"
533                         " for all uses\n");
534    return false;
535  }
536
537  if (MovMI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
538      MovMI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
539    auto *DppCtrl = TII->getNamedOperand(MovMI, AMDGPU::OpName::dpp_ctrl);
540    assert(DppCtrl && DppCtrl->isImm());
541    if (!AMDGPU::isLegalDPALU_DPPControl(DppCtrl->getImm())) {
542      LLVM_DEBUG(dbgs() << "  failed: 64 bit dpp move uses unsupported"
543                           " control value\n");
544      // Let it split, then control may become legal.
545      return false;
546    }
547  }
548
549  auto *RowMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::row_mask);
550  assert(RowMaskOpnd && RowMaskOpnd->isImm());
551  auto *BankMaskOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bank_mask);
552  assert(BankMaskOpnd && BankMaskOpnd->isImm());
553  const bool MaskAllLanes = RowMaskOpnd->getImm() == 0xF &&
554                            BankMaskOpnd->getImm() == 0xF;
555
556  auto *BCZOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::bound_ctrl);
557  assert(BCZOpnd && BCZOpnd->isImm());
558  bool BoundCtrlZero = BCZOpnd->getImm();
559
560  auto *OldOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::old);
561  auto *SrcOpnd = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0);
562  assert(OldOpnd && OldOpnd->isReg());
563  assert(SrcOpnd && SrcOpnd->isReg());
564  if (OldOpnd->getReg().isPhysical() || SrcOpnd->getReg().isPhysical()) {
565    LLVM_DEBUG(dbgs() << "  failed: dpp move reads physreg\n");
566    return false;
567  }
568
569  auto * const OldOpndValue = getOldOpndValue(*OldOpnd);
570  // OldOpndValue is either undef (IMPLICIT_DEF) or immediate or something else
571  // We could use: assert(!OldOpndValue || OldOpndValue->isImm())
572  // but the third option is used to distinguish undef from non-immediate
573  // to reuse IMPLICIT_DEF instruction later
574  assert(!OldOpndValue || OldOpndValue->isImm() || OldOpndValue == OldOpnd);
575
576  bool CombBCZ = false;
577
578  if (MaskAllLanes && BoundCtrlZero) { // [1]
579    CombBCZ = true;
580  } else {
581    if (!OldOpndValue || !OldOpndValue->isImm()) {
582      LLVM_DEBUG(dbgs() << "  failed: the DPP mov isn't combinable\n");
583      return false;
584    }
585
586    if (OldOpndValue->getImm() == 0) {
587      if (MaskAllLanes) {
588        assert(!BoundCtrlZero); // by check [1]
589        CombBCZ = true;
590      }
591    } else if (BoundCtrlZero) {
592      assert(!MaskAllLanes); // by check [1]
593      LLVM_DEBUG(dbgs() <<
594        "  failed: old!=0 and bctrl:0 and not all lanes isn't combinable\n");
595      return false;
596    }
597  }
598
599  LLVM_DEBUG(dbgs() << "  old=";
600    if (!OldOpndValue)
601      dbgs() << "undef";
602    else
603      dbgs() << *OldOpndValue;
604    dbgs() << ", bound_ctrl=" << CombBCZ << '\n');
605
606  SmallVector<MachineInstr*, 4> OrigMIs, DPPMIs;
607  DenseMap<MachineInstr*, SmallVector<unsigned, 4>> RegSeqWithOpNos;
608  auto CombOldVGPR = getRegSubRegPair(*OldOpnd);
609  // try to reuse previous old reg if its undefined (IMPLICIT_DEF)
610  if (CombBCZ && OldOpndValue) { // CombOldVGPR should be undef
611    const TargetRegisterClass *RC = MRI->getRegClass(DPPMovReg);
612    CombOldVGPR = RegSubRegPair(
613      MRI->createVirtualRegister(RC));
614    auto UndefInst = BuildMI(*MovMI.getParent(), MovMI, MovMI.getDebugLoc(),
615                             TII->get(AMDGPU::IMPLICIT_DEF), CombOldVGPR.Reg);
616    DPPMIs.push_back(UndefInst.getInstr());
617  }
618
619  OrigMIs.push_back(&MovMI);
620  bool Rollback = true;
621  SmallVector<MachineOperand*, 16> Uses;
622
623  for (auto &Use : MRI->use_nodbg_operands(DPPMovReg)) {
624    Uses.push_back(&Use);
625  }
626
627  while (!Uses.empty()) {
628    MachineOperand *Use = Uses.pop_back_val();
629    Rollback = true;
630
631    auto &OrigMI = *Use->getParent();
632    LLVM_DEBUG(dbgs() << "  try: " << OrigMI);
633
634    auto OrigOp = OrigMI.getOpcode();
635    assert((TII->get(OrigOp).getSize() != 4 || !AMDGPU::isTrue16Inst(OrigOp)) &&
636           "There should not be e32 True16 instructions pre-RA");
637    if (OrigOp == AMDGPU::REG_SEQUENCE) {
638      Register FwdReg = OrigMI.getOperand(0).getReg();
639      unsigned FwdSubReg = 0;
640
641      if (execMayBeModifiedBeforeAnyUse(*MRI, FwdReg, OrigMI)) {
642        LLVM_DEBUG(dbgs() << "  failed: EXEC mask should remain the same"
643                             " for all uses\n");
644        break;
645      }
646
647      unsigned OpNo, E = OrigMI.getNumOperands();
648      for (OpNo = 1; OpNo < E; OpNo += 2) {
649        if (OrigMI.getOperand(OpNo).getReg() == DPPMovReg) {
650          FwdSubReg = OrigMI.getOperand(OpNo + 1).getImm();
651          break;
652        }
653      }
654
655      if (!FwdSubReg)
656        break;
657
658      for (auto &Op : MRI->use_nodbg_operands(FwdReg)) {
659        if (Op.getSubReg() == FwdSubReg)
660          Uses.push_back(&Op);
661      }
662      RegSeqWithOpNos[&OrigMI].push_back(OpNo);
663      continue;
664    }
665
666    bool IsShrinkable = isShrinkable(OrigMI);
667    if (!(IsShrinkable ||
668          ((TII->isVOP3P(OrigOp) || TII->isVOPC(OrigOp) ||
669            TII->isVOP3(OrigOp)) &&
670           ST->hasVOP3DPP()) ||
671          TII->isVOP1(OrigOp) || TII->isVOP2(OrigOp))) {
672      LLVM_DEBUG(dbgs() << "  failed: not VOP1/2/3/3P/C\n");
673      break;
674    }
675    if (OrigMI.modifiesRegister(AMDGPU::EXEC, ST->getRegisterInfo())) {
676      LLVM_DEBUG(dbgs() << "  failed: can't combine v_cmpx\n");
677      break;
678    }
679
680    auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0);
681    auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1);
682    if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1]
683      LLVM_DEBUG(dbgs() << "  failed: no suitable operands\n");
684      break;
685    }
686
687    auto *Src2 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src2);
688    assert(Src0 && "Src1 without Src0?");
689    if ((Use == Src0 && ((Src1 && Src1->isIdenticalTo(*Src0)) ||
690                         (Src2 && Src2->isIdenticalTo(*Src0)))) ||
691        (Use == Src1 && (Src1->isIdenticalTo(*Src0) ||
692                         (Src2 && Src2->isIdenticalTo(*Src1))))) {
693      LLVM_DEBUG(
694          dbgs()
695          << "  " << OrigMI
696          << "  failed: DPP register is used more than once per instruction\n");
697      break;
698    }
699
700    LLVM_DEBUG(dbgs() << "  combining: " << OrigMI);
701    if (Use == Src0) {
702      if (auto *DPPInst = createDPPInst(OrigMI, MovMI, CombOldVGPR,
703                                        OldOpndValue, CombBCZ, IsShrinkable)) {
704        DPPMIs.push_back(DPPInst);
705        Rollback = false;
706      }
707    } else {
708      assert(Use == Src1 && OrigMI.isCommutable()); // by check [1]
709      auto *BB = OrigMI.getParent();
710      auto *NewMI = BB->getParent()->CloneMachineInstr(&OrigMI);
711      BB->insert(OrigMI, NewMI);
712      if (TII->commuteInstruction(*NewMI)) {
713        LLVM_DEBUG(dbgs() << "  commuted:  " << *NewMI);
714        if (auto *DPPInst =
715                createDPPInst(*NewMI, MovMI, CombOldVGPR, OldOpndValue, CombBCZ,
716                              IsShrinkable)) {
717          DPPMIs.push_back(DPPInst);
718          Rollback = false;
719        }
720      } else
721        LLVM_DEBUG(dbgs() << "  failed: cannot be commuted\n");
722      NewMI->eraseFromParent();
723    }
724    if (Rollback)
725      break;
726    OrigMIs.push_back(&OrigMI);
727  }
728
729  Rollback |= !Uses.empty();
730
731  for (auto *MI : *(Rollback? &DPPMIs : &OrigMIs))
732    MI->eraseFromParent();
733
734  if (!Rollback) {
735    for (auto &S : RegSeqWithOpNos) {
736      if (MRI->use_nodbg_empty(S.first->getOperand(0).getReg())) {
737        S.first->eraseFromParent();
738        continue;
739      }
740      while (!S.second.empty())
741        S.first->getOperand(S.second.pop_back_val()).setIsUndef();
742    }
743  }
744
745  return !Rollback;
746}
747
748bool GCNDPPCombine::runOnMachineFunction(MachineFunction &MF) {
749  ST = &MF.getSubtarget<GCNSubtarget>();
750  if (!ST->hasDPP() || skipFunction(MF.getFunction()))
751    return false;
752
753  MRI = &MF.getRegInfo();
754  TII = ST->getInstrInfo();
755
756  bool Changed = false;
757  for (auto &MBB : MF) {
758    for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(MBB))) {
759      if (MI.getOpcode() == AMDGPU::V_MOV_B32_dpp && combineDPPMov(MI)) {
760        Changed = true;
761        ++NumDPPMovsCombined;
762      } else if (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO ||
763                 MI.getOpcode() == AMDGPU::V_MOV_B64_dpp) {
764        if (ST->hasDPALU_DPP() && combineDPPMov(MI)) {
765          Changed = true;
766          ++NumDPPMovsCombined;
767        } else {
768          auto Split = TII->expandMovDPP64(MI);
769          for (auto *M : {Split.first, Split.second}) {
770            if (M && combineDPPMov(*M))
771              ++NumDPPMovsCombined;
772          }
773          Changed = true;
774        }
775      }
776    }
777  }
778  return Changed;
779}
780