1//===- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file This pass tries to apply several peephole SDWA patterns.
10///
11/// E.g. original:
12///   V_LSHRREV_B32_e32 %0, 16, %1
13///   V_ADD_I32_e32 %2, %0, %3
14///   V_LSHLREV_B32_e32 %4, 16, %2
15///
16/// Replace:
17///   V_ADD_I32_sdwa %4, %1, %3
18///       dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
19///
20//===----------------------------------------------------------------------===//
21
22#include "AMDGPU.h"
23#include "AMDGPUSubtarget.h"
24#include "SIDefines.h"
25#include "SIInstrInfo.h"
26#include "SIRegisterInfo.h"
27#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
28#include "Utils/AMDGPUBaseInfo.h"
29#include "llvm/ADT/MapVector.h"
30#include "llvm/ADT/None.h"
31#include "llvm/ADT/Optional.h"
32#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/CodeGen/MachineBasicBlock.h"
36#include "llvm/CodeGen/MachineFunction.h"
37#include "llvm/CodeGen/MachineFunctionPass.h"
38#include "llvm/CodeGen/MachineInstr.h"
39#include "llvm/CodeGen/MachineInstrBuilder.h"
40#include "llvm/CodeGen/MachineOperand.h"
41#include "llvm/CodeGen/MachineRegisterInfo.h"
42#include "llvm/CodeGen/TargetRegisterInfo.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/MC/LaneBitmask.h"
45#include "llvm/MC/MCInstrDesc.h"
46#include "llvm/Pass.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/raw_ostream.h"
49#include <algorithm>
50#include <cassert>
51#include <cstdint>
52#include <memory>
53#include <unordered_map>
54
55using namespace llvm;
56
57#define DEBUG_TYPE "si-peephole-sdwa"
58
59STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
60STATISTIC(NumSDWAInstructionsPeepholed,
61          "Number of instruction converted to SDWA.");
62
63namespace {
64
65class SDWAOperand;
66class SDWADstOperand;
67
68class SIPeepholeSDWA : public MachineFunctionPass {
69public:
70  using SDWAOperandsVector = SmallVector<SDWAOperand *, 4>;
71
72private:
73  MachineRegisterInfo *MRI;
74  const SIRegisterInfo *TRI;
75  const SIInstrInfo *TII;
76
77  MapVector<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
78  MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
79  SmallVector<MachineInstr *, 8> ConvertedInstructions;
80
81  Optional<int64_t> foldToImm(const MachineOperand &Op) const;
82
83public:
84  static char ID;
85
86  SIPeepholeSDWA() : MachineFunctionPass(ID) {
87    initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
88  }
89
90  bool runOnMachineFunction(MachineFunction &MF) override;
91  void matchSDWAOperands(MachineBasicBlock &MBB);
92  std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
93  bool isConvertibleToSDWA(MachineInstr &MI, const GCNSubtarget &ST) const;
94  void pseudoOpConvertToVOP2(MachineInstr &MI,
95                             const GCNSubtarget &ST) const;
96  bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
97  void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
98
99  StringRef getPassName() const override { return "SI Peephole SDWA"; }
100
101  void getAnalysisUsage(AnalysisUsage &AU) const override {
102    AU.setPreservesCFG();
103    MachineFunctionPass::getAnalysisUsage(AU);
104  }
105};
106
107class SDWAOperand {
108private:
109  MachineOperand *Target; // Operand that would be used in converted instruction
110  MachineOperand *Replaced; // Operand that would be replace by Target
111
112public:
113  SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
114      : Target(TargetOp), Replaced(ReplacedOp) {
115    assert(Target->isReg());
116    assert(Replaced->isReg());
117  }
118
119  virtual ~SDWAOperand() = default;
120
121  virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
122  virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
123
124  MachineOperand *getTargetOperand() const { return Target; }
125  MachineOperand *getReplacedOperand() const { return Replaced; }
126  MachineInstr *getParentInst() const { return Target->getParent(); }
127
128  MachineRegisterInfo *getMRI() const {
129    return &getParentInst()->getParent()->getParent()->getRegInfo();
130  }
131
132#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
133  virtual void print(raw_ostream& OS) const = 0;
134  void dump() const { print(dbgs()); }
135#endif
136};
137
138using namespace AMDGPU::SDWA;
139
140class SDWASrcOperand : public SDWAOperand {
141private:
142  SdwaSel SrcSel;
143  bool Abs;
144  bool Neg;
145  bool Sext;
146
147public:
148  SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
149                 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
150                 bool Sext_ = false)
151      : SDWAOperand(TargetOp, ReplacedOp),
152        SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
153
154  MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
155  bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
156
157  SdwaSel getSrcSel() const { return SrcSel; }
158  bool getAbs() const { return Abs; }
159  bool getNeg() const { return Neg; }
160  bool getSext() const { return Sext; }
161
162  uint64_t getSrcMods(const SIInstrInfo *TII,
163                      const MachineOperand *SrcOp) const;
164
165#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
166  void print(raw_ostream& OS) const override;
167#endif
168};
169
170class SDWADstOperand : public SDWAOperand {
171private:
172  SdwaSel DstSel;
173  DstUnused DstUn;
174
175public:
176
177  SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
178                 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
179    : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
180
181  MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
182  bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
183
184  SdwaSel getDstSel() const { return DstSel; }
185  DstUnused getDstUnused() const { return DstUn; }
186
187#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
188  void print(raw_ostream& OS) const override;
189#endif
190};
191
192class SDWADstPreserveOperand : public SDWADstOperand {
193private:
194  MachineOperand *Preserve;
195
196public:
197  SDWADstPreserveOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
198                         MachineOperand *PreserveOp, SdwaSel DstSel_ = DWORD)
199      : SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
200        Preserve(PreserveOp) {}
201
202  bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
203
204  MachineOperand *getPreservedOperand() const { return Preserve; }
205
206#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
207  void print(raw_ostream& OS) const override;
208#endif
209};
210
211} // end anonymous namespace
212
213INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
214
215char SIPeepholeSDWA::ID = 0;
216
217char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
218
219FunctionPass *llvm::createSIPeepholeSDWAPass() {
220  return new SIPeepholeSDWA();
221}
222
223
224#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
225static raw_ostream& operator<<(raw_ostream &OS, SdwaSel Sel) {
226  switch(Sel) {
227  case BYTE_0: OS << "BYTE_0"; break;
228  case BYTE_1: OS << "BYTE_1"; break;
229  case BYTE_2: OS << "BYTE_2"; break;
230  case BYTE_3: OS << "BYTE_3"; break;
231  case WORD_0: OS << "WORD_0"; break;
232  case WORD_1: OS << "WORD_1"; break;
233  case DWORD:  OS << "DWORD"; break;
234  }
235  return OS;
236}
237
238static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
239  switch(Un) {
240  case UNUSED_PAD: OS << "UNUSED_PAD"; break;
241  case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
242  case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
243  }
244  return OS;
245}
246
247LLVM_DUMP_METHOD
248void SDWASrcOperand::print(raw_ostream& OS) const {
249  OS << "SDWA src: " << *getTargetOperand()
250    << " src_sel:" << getSrcSel()
251    << " abs:" << getAbs() << " neg:" << getNeg()
252    << " sext:" << getSext() << '\n';
253}
254
255LLVM_DUMP_METHOD
256void SDWADstOperand::print(raw_ostream& OS) const {
257  OS << "SDWA dst: " << *getTargetOperand()
258    << " dst_sel:" << getDstSel()
259    << " dst_unused:" << getDstUnused() << '\n';
260}
261
262LLVM_DUMP_METHOD
263void SDWADstPreserveOperand::print(raw_ostream& OS) const {
264  OS << "SDWA preserve dst: " << *getTargetOperand()
265    << " dst_sel:" << getDstSel()
266    << " preserve:" << *getPreservedOperand() << '\n';
267}
268
269#endif
270
271static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
272  assert(To.isReg() && From.isReg());
273  To.setReg(From.getReg());
274  To.setSubReg(From.getSubReg());
275  To.setIsUndef(From.isUndef());
276  if (To.isUse()) {
277    To.setIsKill(From.isKill());
278  } else {
279    To.setIsDead(From.isDead());
280  }
281}
282
283static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
284  return LHS.isReg() &&
285         RHS.isReg() &&
286         LHS.getReg() == RHS.getReg() &&
287         LHS.getSubReg() == RHS.getSubReg();
288}
289
290static MachineOperand *findSingleRegUse(const MachineOperand *Reg,
291                                        const MachineRegisterInfo *MRI) {
292  if (!Reg->isReg() || !Reg->isDef())
293    return nullptr;
294
295  MachineOperand *ResMO = nullptr;
296  for (MachineOperand &UseMO : MRI->use_nodbg_operands(Reg->getReg())) {
297    // If there exist use of subreg of Reg then return nullptr
298    if (!isSameReg(UseMO, *Reg))
299      return nullptr;
300
301    // Check that there is only one instruction that uses Reg
302    if (!ResMO) {
303      ResMO = &UseMO;
304    } else if (ResMO->getParent() != UseMO.getParent()) {
305      return nullptr;
306    }
307  }
308
309  return ResMO;
310}
311
312static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
313                                        const MachineRegisterInfo *MRI) {
314  if (!Reg->isReg())
315    return nullptr;
316
317  MachineInstr *DefInstr = MRI->getUniqueVRegDef(Reg->getReg());
318  if (!DefInstr)
319    return nullptr;
320
321  for (auto &DefMO : DefInstr->defs()) {
322    if (DefMO.isReg() && DefMO.getReg() == Reg->getReg())
323      return &DefMO;
324  }
325
326  // Ignore implicit defs.
327  return nullptr;
328}
329
330uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
331                                    const MachineOperand *SrcOp) const {
332  uint64_t Mods = 0;
333  const auto *MI = SrcOp->getParent();
334  if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
335    if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
336      Mods = Mod->getImm();
337    }
338  } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
339    if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
340      Mods = Mod->getImm();
341    }
342  }
343  if (Abs || Neg) {
344    assert(!Sext &&
345           "Float and integer src modifiers can't be set simulteniously");
346    Mods |= Abs ? SISrcMods::ABS : 0u;
347    Mods ^= Neg ? SISrcMods::NEG : 0u;
348  } else if (Sext) {
349    Mods |= SISrcMods::SEXT;
350  }
351
352  return Mods;
353}
354
355MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
356  // For SDWA src operand potential instruction is one that use register
357  // defined by parent instruction
358  MachineOperand *PotentialMO = findSingleRegUse(getReplacedOperand(), getMRI());
359  if (!PotentialMO)
360    return nullptr;
361
362  return PotentialMO->getParent();
363}
364
365bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
366  // Find operand in instruction that matches source operand and replace it with
367  // target operand. Set corresponding src_sel
368  bool IsPreserveSrc = false;
369  MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
370  MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
371  MachineOperand *SrcMods =
372      TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
373  assert(Src && (Src->isReg() || Src->isImm()));
374  if (!isSameReg(*Src, *getReplacedOperand())) {
375    // If this is not src0 then it could be src1
376    Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
377    SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
378    SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
379
380    if (!Src ||
381        !isSameReg(*Src, *getReplacedOperand())) {
382      // It's possible this Src is a tied operand for
383      // UNUSED_PRESERVE, in which case we can either
384      // abandon the peephole attempt, or if legal we can
385      // copy the target operand into the tied slot
386      // if the preserve operation will effectively cause the same
387      // result by overwriting the rest of the dst.
388      MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
389      MachineOperand *DstUnused =
390        TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
391
392      if (Dst &&
393          DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
394        // This will work if the tied src is acessing WORD_0, and the dst is
395        // writing WORD_1. Modifiers don't matter because all the bits that
396        // would be impacted are being overwritten by the dst.
397        // Any other case will not work.
398        SdwaSel DstSel = static_cast<SdwaSel>(
399            TII->getNamedImmOperand(MI, AMDGPU::OpName::dst_sel));
400        if (DstSel == AMDGPU::SDWA::SdwaSel::WORD_1 &&
401            getSrcSel() == AMDGPU::SDWA::SdwaSel::WORD_0) {
402          IsPreserveSrc = true;
403          auto DstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
404                                                   AMDGPU::OpName::vdst);
405          auto TiedIdx = MI.findTiedOperandIdx(DstIdx);
406          Src = &MI.getOperand(TiedIdx);
407          SrcSel = nullptr;
408          SrcMods = nullptr;
409        } else {
410          // Not legal to convert this src
411          return false;
412        }
413      }
414    }
415    assert(Src && Src->isReg());
416
417    if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
418         MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
419         MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
420         MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
421         !isSameReg(*Src, *getReplacedOperand())) {
422      // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
423      // src2. This is not allowed.
424      return false;
425    }
426
427    assert(isSameReg(*Src, *getReplacedOperand()) &&
428           (IsPreserveSrc || (SrcSel && SrcMods)));
429  }
430  copyRegOperand(*Src, *getTargetOperand());
431  if (!IsPreserveSrc) {
432    SrcSel->setImm(getSrcSel());
433    SrcMods->setImm(getSrcMods(TII, Src));
434  }
435  getTargetOperand()->setIsKill(false);
436  return true;
437}
438
439MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
440  // For SDWA dst operand potential instruction is one that defines register
441  // that this operand uses
442  MachineRegisterInfo *MRI = getMRI();
443  MachineInstr *ParentMI = getParentInst();
444
445  MachineOperand *PotentialMO = findSingleRegDef(getReplacedOperand(), MRI);
446  if (!PotentialMO)
447    return nullptr;
448
449  // Check that ParentMI is the only instruction that uses replaced register
450  for (MachineInstr &UseInst : MRI->use_nodbg_instructions(PotentialMO->getReg())) {
451    if (&UseInst != ParentMI)
452      return nullptr;
453  }
454
455  return PotentialMO->getParent();
456}
457
458bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
459  // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
460
461  if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
462       MI.getOpcode() == AMDGPU::V_FMAC_F32_sdwa ||
463       MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
464       MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
465      getDstSel() != AMDGPU::SDWA::DWORD) {
466    // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
467    return false;
468  }
469
470  MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
471  assert(Operand &&
472         Operand->isReg() &&
473         isSameReg(*Operand, *getReplacedOperand()));
474  copyRegOperand(*Operand, *getTargetOperand());
475  MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
476  assert(DstSel);
477  DstSel->setImm(getDstSel());
478  MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
479  assert(DstUnused);
480  DstUnused->setImm(getDstUnused());
481
482  // Remove original instruction  because it would conflict with our new
483  // instruction by register definition
484  getParentInst()->eraseFromParent();
485  return true;
486}
487
488bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
489                                           const SIInstrInfo *TII) {
490  // MI should be moved right before v_or_b32.
491  // For this we should clear all kill flags on uses of MI src-operands or else
492  // we can encounter problem with use of killed operand.
493  for (MachineOperand &MO : MI.uses()) {
494    if (!MO.isReg())
495      continue;
496    getMRI()->clearKillFlags(MO.getReg());
497  }
498
499  // Move MI before v_or_b32
500  auto MBB = MI.getParent();
501  MBB->remove(&MI);
502  MBB->insert(getParentInst(), &MI);
503
504  // Add Implicit use of preserved register
505  MachineInstrBuilder MIB(*MBB->getParent(), MI);
506  MIB.addReg(getPreservedOperand()->getReg(),
507             RegState::ImplicitKill,
508             getPreservedOperand()->getSubReg());
509
510  // Tie dst to implicit use
511  MI.tieOperands(AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst),
512                 MI.getNumOperands() - 1);
513
514  // Convert MI as any other SDWADstOperand and remove v_or_b32
515  return SDWADstOperand::convertToSDWA(MI, TII);
516}
517
518Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
519  if (Op.isImm()) {
520    return Op.getImm();
521  }
522
523  // If this is not immediate then it can be copy of immediate value, e.g.:
524  // %1 = S_MOV_B32 255;
525  if (Op.isReg()) {
526    for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
527      if (!isSameReg(Op, Def))
528        continue;
529
530      const MachineInstr *DefInst = Def.getParent();
531      if (!TII->isFoldableCopy(*DefInst))
532        return None;
533
534      const MachineOperand &Copied = DefInst->getOperand(1);
535      if (!Copied.isImm())
536        return None;
537
538      return Copied.getImm();
539    }
540  }
541
542  return None;
543}
544
545std::unique_ptr<SDWAOperand>
546SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
547  unsigned Opcode = MI.getOpcode();
548  switch (Opcode) {
549  case AMDGPU::V_LSHRREV_B32_e32:
550  case AMDGPU::V_ASHRREV_I32_e32:
551  case AMDGPU::V_LSHLREV_B32_e32:
552  case AMDGPU::V_LSHRREV_B32_e64:
553  case AMDGPU::V_ASHRREV_I32_e64:
554  case AMDGPU::V_LSHLREV_B32_e64: {
555    // from: v_lshrrev_b32_e32 v1, 16/24, v0
556    // to SDWA src:v0 src_sel:WORD_1/BYTE_3
557
558    // from: v_ashrrev_i32_e32 v1, 16/24, v0
559    // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
560
561    // from: v_lshlrev_b32_e32 v1, 16/24, v0
562    // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
563    MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
564    auto Imm = foldToImm(*Src0);
565    if (!Imm)
566      break;
567
568    if (*Imm != 16 && *Imm != 24)
569      break;
570
571    MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
572    MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
573    if (Register::isPhysicalRegister(Src1->getReg()) ||
574        Register::isPhysicalRegister(Dst->getReg()))
575      break;
576
577    if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
578        Opcode == AMDGPU::V_LSHLREV_B32_e64) {
579      return std::make_unique<SDWADstOperand>(
580          Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
581    } else {
582      return std::make_unique<SDWASrcOperand>(
583          Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
584          Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
585          Opcode != AMDGPU::V_LSHRREV_B32_e64);
586    }
587    break;
588  }
589
590  case AMDGPU::V_LSHRREV_B16_e32:
591  case AMDGPU::V_ASHRREV_I16_e32:
592  case AMDGPU::V_LSHLREV_B16_e32:
593  case AMDGPU::V_LSHRREV_B16_e64:
594  case AMDGPU::V_ASHRREV_I16_e64:
595  case AMDGPU::V_LSHLREV_B16_e64: {
596    // from: v_lshrrev_b16_e32 v1, 8, v0
597    // to SDWA src:v0 src_sel:BYTE_1
598
599    // from: v_ashrrev_i16_e32 v1, 8, v0
600    // to SDWA src:v0 src_sel:BYTE_1 sext:1
601
602    // from: v_lshlrev_b16_e32 v1, 8, v0
603    // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
604    MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
605    auto Imm = foldToImm(*Src0);
606    if (!Imm || *Imm != 8)
607      break;
608
609    MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
610    MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
611
612    if (Register::isPhysicalRegister(Src1->getReg()) ||
613        Register::isPhysicalRegister(Dst->getReg()))
614      break;
615
616    if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
617        Opcode == AMDGPU::V_LSHLREV_B16_e64) {
618      return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
619    } else {
620      return std::make_unique<SDWASrcOperand>(
621            Src1, Dst, BYTE_1, false, false,
622            Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
623            Opcode != AMDGPU::V_LSHRREV_B16_e64);
624    }
625    break;
626  }
627
628  case AMDGPU::V_BFE_I32:
629  case AMDGPU::V_BFE_U32: {
630    // e.g.:
631    // from: v_bfe_u32 v1, v0, 8, 8
632    // to SDWA src:v0 src_sel:BYTE_1
633
634    // offset | width | src_sel
635    // ------------------------
636    // 0      | 8     | BYTE_0
637    // 0      | 16    | WORD_0
638    // 0      | 32    | DWORD ?
639    // 8      | 8     | BYTE_1
640    // 16     | 8     | BYTE_2
641    // 16     | 16    | WORD_1
642    // 24     | 8     | BYTE_3
643
644    MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
645    auto Offset = foldToImm(*Src1);
646    if (!Offset)
647      break;
648
649    MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
650    auto Width = foldToImm(*Src2);
651    if (!Width)
652      break;
653
654    SdwaSel SrcSel = DWORD;
655
656    if (*Offset == 0 && *Width == 8)
657      SrcSel = BYTE_0;
658    else if (*Offset == 0 && *Width == 16)
659      SrcSel = WORD_0;
660    else if (*Offset == 0 && *Width == 32)
661      SrcSel = DWORD;
662    else if (*Offset == 8 && *Width == 8)
663      SrcSel = BYTE_1;
664    else if (*Offset == 16 && *Width == 8)
665      SrcSel = BYTE_2;
666    else if (*Offset == 16 && *Width == 16)
667      SrcSel = WORD_1;
668    else if (*Offset == 24 && *Width == 8)
669      SrcSel = BYTE_3;
670    else
671      break;
672
673    MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
674    MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
675
676    if (Register::isPhysicalRegister(Src0->getReg()) ||
677        Register::isPhysicalRegister(Dst->getReg()))
678      break;
679
680    return std::make_unique<SDWASrcOperand>(
681          Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32);
682  }
683
684  case AMDGPU::V_AND_B32_e32:
685  case AMDGPU::V_AND_B32_e64: {
686    // e.g.:
687    // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
688    // to SDWA src:v0 src_sel:WORD_0/BYTE_0
689
690    MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
691    MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
692    auto ValSrc = Src1;
693    auto Imm = foldToImm(*Src0);
694
695    if (!Imm) {
696      Imm = foldToImm(*Src1);
697      ValSrc = Src0;
698    }
699
700    if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
701      break;
702
703    MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
704
705    if (Register::isPhysicalRegister(ValSrc->getReg()) ||
706        Register::isPhysicalRegister(Dst->getReg()))
707      break;
708
709    return std::make_unique<SDWASrcOperand>(
710        ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
711  }
712
713  case AMDGPU::V_OR_B32_e32:
714  case AMDGPU::V_OR_B32_e64: {
715    // Patterns for dst_unused:UNUSED_PRESERVE.
716    // e.g., from:
717    // v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD
718    //                           src1_sel:WORD_1 src2_sel:WORD1
719    // v_add_f16_e32 v3, v1, v2
720    // v_or_b32_e32 v4, v0, v3
721    // to SDWA preserve dst:v4 dst_sel:WORD_1 dst_unused:UNUSED_PRESERVE preserve:v3
722
723    // Check if one of operands of v_or_b32 is SDWA instruction
724    using CheckRetType = Optional<std::pair<MachineOperand *, MachineOperand *>>;
725    auto CheckOROperandsForSDWA =
726      [&](const MachineOperand *Op1, const MachineOperand *Op2) -> CheckRetType {
727        if (!Op1 || !Op1->isReg() || !Op2 || !Op2->isReg())
728          return CheckRetType(None);
729
730        MachineOperand *Op1Def = findSingleRegDef(Op1, MRI);
731        if (!Op1Def)
732          return CheckRetType(None);
733
734        MachineInstr *Op1Inst = Op1Def->getParent();
735        if (!TII->isSDWA(*Op1Inst))
736          return CheckRetType(None);
737
738        MachineOperand *Op2Def = findSingleRegDef(Op2, MRI);
739        if (!Op2Def)
740          return CheckRetType(None);
741
742        return CheckRetType(std::make_pair(Op1Def, Op2Def));
743      };
744
745    MachineOperand *OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
746    MachineOperand *OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
747    assert(OrSDWA && OrOther);
748    auto Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
749    if (!Res) {
750      OrSDWA = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
751      OrOther = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
752      assert(OrSDWA && OrOther);
753      Res = CheckOROperandsForSDWA(OrSDWA, OrOther);
754      if (!Res)
755        break;
756    }
757
758    MachineOperand *OrSDWADef = Res->first;
759    MachineOperand *OrOtherDef = Res->second;
760    assert(OrSDWADef && OrOtherDef);
761
762    MachineInstr *SDWAInst = OrSDWADef->getParent();
763    MachineInstr *OtherInst = OrOtherDef->getParent();
764
765    // Check that OtherInstr is actually bitwise compatible with SDWAInst = their
766    // destination patterns don't overlap. Compatible instruction can be either
767    // regular instruction with compatible bitness or SDWA instruction with
768    // correct dst_sel
769    // SDWAInst | OtherInst bitness / OtherInst dst_sel
770    // -----------------------------------------------------
771    // DWORD    | no                    / no
772    // WORD_0   | no                    / BYTE_2/3, WORD_1
773    // WORD_1   | 8/16-bit instructions / BYTE_0/1, WORD_0
774    // BYTE_0   | no                    / BYTE_1/2/3, WORD_1
775    // BYTE_1   | 8-bit                 / BYTE_0/2/3, WORD_1
776    // BYTE_2   | 8/16-bit              / BYTE_0/1/3. WORD_0
777    // BYTE_3   | 8/16/24-bit           / BYTE_0/1/2, WORD_0
778    // E.g. if SDWAInst is v_add_f16_sdwa dst_sel:WORD_1 then v_add_f16 is OK
779    // but v_add_f32 is not.
780
781    // TODO: add support for non-SDWA instructions as OtherInst.
782    // For now this only works with SDWA instructions. For regular instructions
783    // there is no way to determine if the instruction writes only 8/16/24-bit
784    // out of full register size and all registers are at min 32-bit wide.
785    if (!TII->isSDWA(*OtherInst))
786      break;
787
788    SdwaSel DstSel = static_cast<SdwaSel>(
789      TII->getNamedImmOperand(*SDWAInst, AMDGPU::OpName::dst_sel));;
790    SdwaSel OtherDstSel = static_cast<SdwaSel>(
791      TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_sel));
792
793    bool DstSelAgree = false;
794    switch (DstSel) {
795    case WORD_0: DstSelAgree = ((OtherDstSel == BYTE_2) ||
796                                (OtherDstSel == BYTE_3) ||
797                                (OtherDstSel == WORD_1));
798      break;
799    case WORD_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
800                                (OtherDstSel == BYTE_1) ||
801                                (OtherDstSel == WORD_0));
802      break;
803    case BYTE_0: DstSelAgree = ((OtherDstSel == BYTE_1) ||
804                                (OtherDstSel == BYTE_2) ||
805                                (OtherDstSel == BYTE_3) ||
806                                (OtherDstSel == WORD_1));
807      break;
808    case BYTE_1: DstSelAgree = ((OtherDstSel == BYTE_0) ||
809                                (OtherDstSel == BYTE_2) ||
810                                (OtherDstSel == BYTE_3) ||
811                                (OtherDstSel == WORD_1));
812      break;
813    case BYTE_2: DstSelAgree = ((OtherDstSel == BYTE_0) ||
814                                (OtherDstSel == BYTE_1) ||
815                                (OtherDstSel == BYTE_3) ||
816                                (OtherDstSel == WORD_0));
817      break;
818    case BYTE_3: DstSelAgree = ((OtherDstSel == BYTE_0) ||
819                                (OtherDstSel == BYTE_1) ||
820                                (OtherDstSel == BYTE_2) ||
821                                (OtherDstSel == WORD_0));
822      break;
823    default: DstSelAgree = false;
824    }
825
826    if (!DstSelAgree)
827      break;
828
829    // Also OtherInst dst_unused should be UNUSED_PAD
830    DstUnused OtherDstUnused = static_cast<DstUnused>(
831      TII->getNamedImmOperand(*OtherInst, AMDGPU::OpName::dst_unused));
832    if (OtherDstUnused != DstUnused::UNUSED_PAD)
833      break;
834
835    // Create DstPreserveOperand
836    MachineOperand *OrDst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
837    assert(OrDst && OrDst->isReg());
838
839    return std::make_unique<SDWADstPreserveOperand>(
840      OrDst, OrSDWADef, OrOtherDef, DstSel);
841
842  }
843  }
844
845  return std::unique_ptr<SDWAOperand>(nullptr);
846}
847
848#if !defined(NDEBUG)
849static raw_ostream& operator<<(raw_ostream &OS, const SDWAOperand &Operand) {
850  Operand.print(OS);
851  return OS;
852}
853#endif
854
855void SIPeepholeSDWA::matchSDWAOperands(MachineBasicBlock &MBB) {
856  for (MachineInstr &MI : MBB) {
857    if (auto Operand = matchSDWAOperand(MI)) {
858      LLVM_DEBUG(dbgs() << "Match: " << MI << "To: " << *Operand << '\n');
859      SDWAOperands[&MI] = std::move(Operand);
860      ++NumSDWAPatternsFound;
861    }
862  }
863}
864
865// Convert the V_ADDC_U32_e64 into V_ADDC_U32_e32, and
866// V_ADD_I32_e64 into V_ADD_I32_e32. This allows isConvertibleToSDWA
867// to perform its transformation on V_ADD_I32_e32 into V_ADD_I32_sdwa.
868//
869// We are transforming from a VOP3 into a VOP2 form of the instruction.
870//   %19:vgpr_32 = V_AND_B32_e32 255,
871//       killed %16:vgpr_32, implicit $exec
872//   %47:vgpr_32, %49:sreg_64_xexec = V_ADD_I32_e64
873//       %26.sub0:vreg_64, %19:vgpr_32, implicit $exec
874//  %48:vgpr_32, dead %50:sreg_64_xexec = V_ADDC_U32_e64
875//       %26.sub1:vreg_64, %54:vgpr_32, killed %49:sreg_64_xexec, implicit $exec
876//
877// becomes
878//   %47:vgpr_32 = V_ADD_I32_sdwa
879//       0, %26.sub0:vreg_64, 0, killed %16:vgpr_32, 0, 6, 0, 6, 0,
880//       implicit-def $vcc, implicit $exec
881//  %48:vgpr_32 = V_ADDC_U32_e32
882//       0, %26.sub1:vreg_64, implicit-def $vcc, implicit $vcc, implicit $exec
883void SIPeepholeSDWA::pseudoOpConvertToVOP2(MachineInstr &MI,
884                                           const GCNSubtarget &ST) const {
885  int Opc = MI.getOpcode();
886  assert((Opc == AMDGPU::V_ADD_I32_e64 || Opc == AMDGPU::V_SUB_I32_e64) &&
887         "Currently only handles V_ADD_I32_e64 or V_SUB_I32_e64");
888
889  // Can the candidate MI be shrunk?
890  if (!TII->canShrink(MI, *MRI))
891    return;
892  Opc = AMDGPU::getVOPe32(Opc);
893  // Find the related ADD instruction.
894  const MachineOperand *Sdst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
895  if (!Sdst)
896    return;
897  MachineOperand *NextOp = findSingleRegUse(Sdst, MRI);
898  if (!NextOp)
899    return;
900  MachineInstr &MISucc = *NextOp->getParent();
901  // Can the successor be shrunk?
902  if (!TII->canShrink(MISucc, *MRI))
903    return;
904  int SuccOpc = AMDGPU::getVOPe32(MISucc.getOpcode());
905  // Make sure the carry in/out are subsequently unused.
906  MachineOperand *CarryIn = TII->getNamedOperand(MISucc, AMDGPU::OpName::src2);
907  if (!CarryIn)
908    return;
909  MachineOperand *CarryOut = TII->getNamedOperand(MISucc, AMDGPU::OpName::sdst);
910  if (!CarryOut)
911    return;
912  if (!MRI->hasOneUse(CarryIn->getReg()) || !MRI->use_empty(CarryOut->getReg()))
913    return;
914  // Make sure VCC or its subregs are dead before MI.
915  MachineBasicBlock &MBB = *MI.getParent();
916  auto Liveness = MBB.computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 25);
917  if (Liveness != MachineBasicBlock::LQR_Dead)
918    return;
919  // Check if VCC is referenced in range of (MI,MISucc].
920  for (auto I = std::next(MI.getIterator()), E = MISucc.getIterator();
921       I != E; ++I) {
922    if (I->modifiesRegister(AMDGPU::VCC, TRI))
923      return;
924  }
925
926  // Make the two new e32 instruction variants.
927  // Replace MI with V_{SUB|ADD}_I32_e32
928  BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opc))
929    .add(*TII->getNamedOperand(MI, AMDGPU::OpName::vdst))
930    .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0))
931    .add(*TII->getNamedOperand(MI, AMDGPU::OpName::src1))
932    .setMIFlags(MI.getFlags());
933
934  MI.eraseFromParent();
935
936  // Replace MISucc with V_{SUBB|ADDC}_U32_e32
937  BuildMI(MBB, MISucc, MISucc.getDebugLoc(), TII->get(SuccOpc))
938    .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::vdst))
939    .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src0))
940    .add(*TII->getNamedOperand(MISucc, AMDGPU::OpName::src1))
941    .setMIFlags(MISucc.getFlags());
942
943  MISucc.eraseFromParent();
944}
945
946bool SIPeepholeSDWA::isConvertibleToSDWA(MachineInstr &MI,
947                                         const GCNSubtarget &ST) const {
948  // Check if this is already an SDWA instruction
949  unsigned Opc = MI.getOpcode();
950  if (TII->isSDWA(Opc))
951    return true;
952
953  // Check if this instruction has opcode that supports SDWA
954  if (AMDGPU::getSDWAOp(Opc) == -1)
955    Opc = AMDGPU::getVOPe32(Opc);
956
957  if (AMDGPU::getSDWAOp(Opc) == -1)
958    return false;
959
960  if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
961    return false;
962
963  if (TII->isVOPC(Opc)) {
964    if (!ST.hasSDWASdst()) {
965      const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
966      if (SDst && (SDst->getReg() != AMDGPU::VCC &&
967                   SDst->getReg() != AMDGPU::VCC_LO))
968        return false;
969    }
970
971    if (!ST.hasSDWAOutModsVOPC() &&
972        (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
973         TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
974      return false;
975
976  } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
977             !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
978    return false;
979  }
980
981  if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_FMAC_F16_e32 ||
982                           Opc == AMDGPU::V_FMAC_F32_e32 ||
983                           Opc == AMDGPU::V_MAC_F16_e32 ||
984                           Opc == AMDGPU::V_MAC_F32_e32))
985    return false;
986
987  // Check if target supports this SDWA opcode
988  if (TII->pseudoToMCOpcode(Opc) == -1)
989    return false;
990
991  // FIXME: has SDWA but require handling of implicit VCC use
992  if (Opc == AMDGPU::V_CNDMASK_B32_e32)
993    return false;
994
995  return true;
996}
997
998bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
999                                   const SDWAOperandsVector &SDWAOperands) {
1000
1001  LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
1002
1003  // Convert to sdwa
1004  int SDWAOpcode;
1005  unsigned Opcode = MI.getOpcode();
1006  if (TII->isSDWA(Opcode)) {
1007    SDWAOpcode = Opcode;
1008  } else {
1009    SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
1010    if (SDWAOpcode == -1)
1011      SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
1012  }
1013  assert(SDWAOpcode != -1);
1014
1015  const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
1016
1017  // Create SDWA version of instruction MI and initialize its operands
1018  MachineInstrBuilder SDWAInst =
1019    BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc)
1020    .setMIFlags(MI.getFlags());
1021
1022  // Copy dst, if it is present in original then should also be present in SDWA
1023  MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
1024  if (Dst) {
1025    assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1);
1026    SDWAInst.add(*Dst);
1027  } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
1028    assert(Dst &&
1029           AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1030    SDWAInst.add(*Dst);
1031  } else {
1032    assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
1033    SDWAInst.addReg(TRI->getVCC(), RegState::Define);
1034  }
1035
1036  // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
1037  // src0_modifiers (except for v_nop_sdwa, but it can't get here)
1038  MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1039  assert(
1040    Src0 &&
1041    AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 &&
1042    AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1);
1043  if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
1044    SDWAInst.addImm(Mod->getImm());
1045  else
1046    SDWAInst.addImm(0);
1047  SDWAInst.add(*Src0);
1048
1049  // Copy src1 if present, initialize src1_modifiers.
1050  MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1051  if (Src1) {
1052    assert(
1053      AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 &&
1054      AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1);
1055    if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
1056      SDWAInst.addImm(Mod->getImm());
1057    else
1058      SDWAInst.addImm(0);
1059    SDWAInst.add(*Src1);
1060  }
1061
1062  if (SDWAOpcode == AMDGPU::V_FMAC_F16_sdwa ||
1063      SDWAOpcode == AMDGPU::V_FMAC_F32_sdwa ||
1064      SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
1065      SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
1066    // v_mac_f16/32 has additional src2 operand tied to vdst
1067    MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
1068    assert(Src2);
1069    SDWAInst.add(*Src2);
1070  }
1071
1072  // Copy clamp if present, initialize otherwise
1073  assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
1074  MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
1075  if (Clamp) {
1076    SDWAInst.add(*Clamp);
1077  } else {
1078    SDWAInst.addImm(0);
1079  }
1080
1081  // Copy omod if present, initialize otherwise if needed
1082  if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
1083    MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
1084    if (OMod) {
1085      SDWAInst.add(*OMod);
1086    } else {
1087      SDWAInst.addImm(0);
1088    }
1089  }
1090
1091  // Copy dst_sel if present, initialize otherwise if needed
1092  if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) {
1093    MachineOperand *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
1094    if (DstSel) {
1095      SDWAInst.add(*DstSel);
1096    } else {
1097      SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1098    }
1099  }
1100
1101  // Copy dst_unused if present, initialize otherwise if needed
1102  if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) {
1103    MachineOperand *DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1104    if (DstUnused) {
1105      SDWAInst.add(*DstUnused);
1106    } else {
1107      SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
1108    }
1109  }
1110
1111  // Copy src0_sel if present, initialize otherwise
1112  assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1);
1113  MachineOperand *Src0Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
1114  if (Src0Sel) {
1115    SDWAInst.add(*Src0Sel);
1116  } else {
1117    SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1118  }
1119
1120  // Copy src1_sel if present, initialize otherwise if needed
1121  if (Src1) {
1122    assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1);
1123    MachineOperand *Src1Sel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
1124    if (Src1Sel) {
1125      SDWAInst.add(*Src1Sel);
1126    } else {
1127      SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
1128    }
1129  }
1130
1131  // Check for a preserved register that needs to be copied.
1132  auto DstUnused = TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
1133  if (DstUnused &&
1134      DstUnused->getImm() == AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) {
1135    // We expect, if we are here, that the instruction was already in it's SDWA form,
1136    // with a tied operand.
1137    assert(Dst && Dst->isTied());
1138    assert(Opcode == static_cast<unsigned int>(SDWAOpcode));
1139    // We also expect a vdst, since sdst can't preserve.
1140    auto PreserveDstIdx = AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst);
1141    assert(PreserveDstIdx != -1);
1142
1143    auto TiedIdx = MI.findTiedOperandIdx(PreserveDstIdx);
1144    auto Tied = MI.getOperand(TiedIdx);
1145
1146    SDWAInst.add(Tied);
1147    SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
1148  }
1149
1150  // Apply all sdwa operand patterns.
1151  bool Converted = false;
1152  for (auto &Operand : SDWAOperands) {
1153    LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
1154    // There should be no intesection between SDWA operands and potential MIs
1155    // e.g.:
1156    // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
1157    // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
1158    // v_add_u32 v3, v4, v2
1159    //
1160    // In that example it is possible that we would fold 2nd instruction into 3rd
1161    // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was
1162    // already destroyed). So if SDWAOperand is also a potential MI then do not
1163    // apply it.
1164    if (PotentialMatches.count(Operand->getParentInst()) == 0)
1165      Converted |= Operand->convertToSDWA(*SDWAInst, TII);
1166  }
1167  if (Converted) {
1168    ConvertedInstructions.push_back(SDWAInst);
1169  } else {
1170    SDWAInst->eraseFromParent();
1171    return false;
1172  }
1173
1174  LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
1175  ++NumSDWAInstructionsPeepholed;
1176
1177  MI.eraseFromParent();
1178  return true;
1179}
1180
1181// If an instruction was converted to SDWA it should not have immediates or SGPR
1182// operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
1183void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
1184                                            const GCNSubtarget &ST) const {
1185  const MCInstrDesc &Desc = TII->get(MI.getOpcode());
1186  unsigned ConstantBusCount = 0;
1187  for (MachineOperand &Op : MI.explicit_uses()) {
1188    if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
1189      continue;
1190
1191    unsigned I = MI.getOperandNo(&Op);
1192    if (Desc.OpInfo[I].RegClass == -1 ||
1193       !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
1194      continue;
1195
1196    if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
1197        TRI->isSGPRReg(*MRI, Op.getReg())) {
1198      ++ConstantBusCount;
1199      continue;
1200    }
1201
1202    Register VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1203    auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1204                        TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
1205    if (Op.isImm())
1206      Copy.addImm(Op.getImm());
1207    else if (Op.isReg())
1208      Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
1209                  Op.getSubReg());
1210    Op.ChangeToRegister(VGPR, false);
1211  }
1212}
1213
1214bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
1215  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1216
1217  if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
1218    return false;
1219
1220  MRI = &MF.getRegInfo();
1221  TRI = ST.getRegisterInfo();
1222  TII = ST.getInstrInfo();
1223
1224  // Find all SDWA operands in MF.
1225  bool Ret = false;
1226  for (MachineBasicBlock &MBB : MF) {
1227    bool Changed = false;
1228    do {
1229      // Preprocess the ADD/SUB pairs so they could be SDWA'ed.
1230      // Look for a possible ADD or SUB that resulted from a previously lowered
1231      // V_{ADD|SUB}_U64_PSEUDO. The function pseudoOpConvertToVOP2
1232      // lowers the pair of instructions into e32 form.
1233      matchSDWAOperands(MBB);
1234      for (const auto &OperandPair : SDWAOperands) {
1235        const auto &Operand = OperandPair.second;
1236        MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1237        if (PotentialMI &&
1238           (PotentialMI->getOpcode() == AMDGPU::V_ADD_I32_e64 ||
1239            PotentialMI->getOpcode() == AMDGPU::V_SUB_I32_e64))
1240          pseudoOpConvertToVOP2(*PotentialMI, ST);
1241      }
1242      SDWAOperands.clear();
1243
1244      // Generate potential match list.
1245      matchSDWAOperands(MBB);
1246
1247      for (const auto &OperandPair : SDWAOperands) {
1248        const auto &Operand = OperandPair.second;
1249        MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
1250        if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
1251          PotentialMatches[PotentialMI].push_back(Operand.get());
1252        }
1253      }
1254
1255      for (auto &PotentialPair : PotentialMatches) {
1256        MachineInstr &PotentialMI = *PotentialPair.first;
1257        convertToSDWA(PotentialMI, PotentialPair.second);
1258      }
1259
1260      PotentialMatches.clear();
1261      SDWAOperands.clear();
1262
1263      Changed = !ConvertedInstructions.empty();
1264
1265      if (Changed)
1266        Ret = true;
1267      while (!ConvertedInstructions.empty())
1268        legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);
1269    } while (Changed);
1270  }
1271
1272  return Ret;
1273}
1274