1//===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8//===----------------------------------------------------------------------===//
9//
10
11#include "AMDGPU.h"
12#include "AMDGPUSubtarget.h"
13#include "SIInstrInfo.h"
14#include "SIMachineFunctionInfo.h"
15#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16#include "llvm/ADT/DepthFirstIterator.h"
17#include "llvm/ADT/SetVector.h"
18#include "llvm/CodeGen/MachineFunctionPass.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/raw_ostream.h"
23#include "llvm/Target/TargetMachine.h"
24
25#define DEBUG_TYPE "si-fold-operands"
26using namespace llvm;
27
28namespace {
29
30struct FoldCandidate {
31  MachineInstr *UseMI;
32  union {
33    MachineOperand *OpToFold;
34    uint64_t ImmToFold;
35    int FrameIndexToFold;
36  };
37  int ShrinkOpcode;
38  unsigned char UseOpNo;
39  MachineOperand::MachineOperandType Kind;
40  bool Commuted;
41
42  FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                bool Commuted_ = false,
44                int ShrinkOp = -1) :
45    UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46    Kind(FoldOp->getType()),
47    Commuted(Commuted_) {
48    if (FoldOp->isImm()) {
49      ImmToFold = FoldOp->getImm();
50    } else if (FoldOp->isFI()) {
51      FrameIndexToFold = FoldOp->getIndex();
52    } else {
53      assert(FoldOp->isReg() || FoldOp->isGlobal());
54      OpToFold = FoldOp;
55    }
56  }
57
58  bool isFI() const {
59    return Kind == MachineOperand::MO_FrameIndex;
60  }
61
62  bool isImm() const {
63    return Kind == MachineOperand::MO_Immediate;
64  }
65
66  bool isReg() const {
67    return Kind == MachineOperand::MO_Register;
68  }
69
70  bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71
72  bool isCommuted() const {
73    return Commuted;
74  }
75
76  bool needsShrink() const {
77    return ShrinkOpcode != -1;
78  }
79
80  int getShrinkOpcode() const {
81    return ShrinkOpcode;
82  }
83};
84
85class SIFoldOperands : public MachineFunctionPass {
86public:
87  static char ID;
88  MachineRegisterInfo *MRI;
89  const SIInstrInfo *TII;
90  const SIRegisterInfo *TRI;
91  const GCNSubtarget *ST;
92  const SIMachineFunctionInfo *MFI;
93
94  void foldOperand(MachineOperand &OpToFold,
95                   MachineInstr *UseMI,
96                   int UseOpIdx,
97                   SmallVectorImpl<FoldCandidate> &FoldList,
98                   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99
100  void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101
102  const MachineOperand *isClamp(const MachineInstr &MI) const;
103  bool tryFoldClamp(MachineInstr &MI);
104
105  std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106  bool tryFoldOMod(MachineInstr &MI);
107
108public:
109  SIFoldOperands() : MachineFunctionPass(ID) {
110    initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111  }
112
113  bool runOnMachineFunction(MachineFunction &MF) override;
114
115  StringRef getPassName() const override { return "SI Fold Operands"; }
116
117  void getAnalysisUsage(AnalysisUsage &AU) const override {
118    AU.setPreservesCFG();
119    MachineFunctionPass::getAnalysisUsage(AU);
120  }
121};
122
123} // End anonymous namespace.
124
125INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                "SI Fold Operands", false, false)
127
128char SIFoldOperands::ID = 0;
129
130char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131
132// Wrapper around isInlineConstant that understands special cases when
133// instruction types are replaced during operand folding.
134static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135                                     const MachineInstr &UseMI,
136                                     unsigned OpNo,
137                                     const MachineOperand &OpToFold) {
138  if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139    return true;
140
141  unsigned Opc = UseMI.getOpcode();
142  switch (Opc) {
143  case AMDGPU::V_MAC_F32_e64:
144  case AMDGPU::V_MAC_F16_e64:
145  case AMDGPU::V_FMAC_F32_e64:
146  case AMDGPU::V_FMAC_F16_e64: {
147    // Special case for mac. Since this is replaced with mad when folded into
148    // src2, we need to check the legality for the final instruction.
149    int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150    if (static_cast<int>(OpNo) == Src2Idx) {
151      bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152                   Opc == AMDGPU::V_FMAC_F16_e64;
153      bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154                   Opc == AMDGPU::V_FMAC_F32_e64;
155
156      unsigned Opc = IsFMA ?
157        (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158        (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159      const MCInstrDesc &MadDesc = TII->get(Opc);
160      return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
161    }
162    return false;
163  }
164  default:
165    return false;
166  }
167}
168
169// TODO: Add heuristic that the frame index might not fit in the addressing mode
170// immediate offset to avoid materializing in loops.
171static bool frameIndexMayFold(const SIInstrInfo *TII,
172                              const MachineInstr &UseMI,
173                              int OpNo,
174                              const MachineOperand &OpToFold) {
175  return OpToFold.isFI() &&
176    (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
177    OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
178}
179
180FunctionPass *llvm::createSIFoldOperandsPass() {
181  return new SIFoldOperands();
182}
183
184static bool updateOperand(FoldCandidate &Fold,
185                          const SIInstrInfo &TII,
186                          const TargetRegisterInfo &TRI,
187                          const GCNSubtarget &ST) {
188  MachineInstr *MI = Fold.UseMI;
189  MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
190  assert(Old.isReg());
191
192  if (Fold.isImm()) {
193    if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
194        !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
195        AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
196                                      ST.hasInv2PiInlineImm())) {
197      // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
198      // already set.
199      unsigned Opcode = MI->getOpcode();
200      int OpNo = MI->getOperandNo(&Old);
201      int ModIdx = -1;
202      if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
203        ModIdx = AMDGPU::OpName::src0_modifiers;
204      else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
205        ModIdx = AMDGPU::OpName::src1_modifiers;
206      else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
207        ModIdx = AMDGPU::OpName::src2_modifiers;
208      assert(ModIdx != -1);
209      ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
210      MachineOperand &Mod = MI->getOperand(ModIdx);
211      unsigned Val = Mod.getImm();
212      if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
213        // Only apply the following transformation if that operand requries
214        // a packed immediate.
215        switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
216        case AMDGPU::OPERAND_REG_IMM_V2FP16:
217        case AMDGPU::OPERAND_REG_IMM_V2INT16:
218        case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
219        case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
220          // If upper part is all zero we do not need op_sel_hi.
221          if (!isUInt<16>(Fold.ImmToFold)) {
222            if (!(Fold.ImmToFold & 0xffff)) {
223              Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
224              Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
225              Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
226              return true;
227            }
228            Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
229            Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
230            return true;
231          }
232          break;
233        default:
234          break;
235        }
236      }
237    }
238  }
239
240  if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
241    MachineBasicBlock *MBB = MI->getParent();
242    auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
243    if (Liveness != MachineBasicBlock::LQR_Dead) {
244      LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
245      return false;
246    }
247
248    MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
249    int Op32 = Fold.getShrinkOpcode();
250    MachineOperand &Dst0 = MI->getOperand(0);
251    MachineOperand &Dst1 = MI->getOperand(1);
252    assert(Dst0.isDef() && Dst1.isDef());
253
254    bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
255
256    const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
257    Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
258
259    MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
260
261    if (HaveNonDbgCarryUse) {
262      BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
263        .addReg(AMDGPU::VCC, RegState::Kill);
264    }
265
266    // Keep the old instruction around to avoid breaking iterators, but
267    // replace it with a dummy instruction to remove uses.
268    //
269    // FIXME: We should not invert how this pass looks at operands to avoid
270    // this. Should track set of foldable movs instead of looking for uses
271    // when looking at a use.
272    Dst0.setReg(NewReg0);
273    for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
274      MI->RemoveOperand(I);
275    MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
276
277    if (Fold.isCommuted())
278      TII.commuteInstruction(*Inst32, false);
279    return true;
280  }
281
282  assert(!Fold.needsShrink() && "not handled");
283
284  if (Fold.isImm()) {
285    // FIXME: ChangeToImmediate should probably clear the subreg flags. It's
286    // reinterpreted as TargetFlags.
287    Old.setSubReg(0);
288    Old.ChangeToImmediate(Fold.ImmToFold);
289    return true;
290  }
291
292  if (Fold.isGlobal()) {
293    Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
294                   Fold.OpToFold->getTargetFlags());
295    return true;
296  }
297
298  if (Fold.isFI()) {
299    Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
300    return true;
301  }
302
303  MachineOperand *New = Fold.OpToFold;
304  Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
305  Old.setIsUndef(New->isUndef());
306  return true;
307}
308
309static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
310                              const MachineInstr *MI) {
311  for (auto Candidate : FoldList) {
312    if (Candidate.UseMI == MI)
313      return true;
314  }
315  return false;
316}
317
318static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
319                                MachineInstr *MI, unsigned OpNo,
320                                MachineOperand *FoldOp, bool Commuted = false,
321                                int ShrinkOp = -1) {
322  // Skip additional folding on the same operand.
323  for (FoldCandidate &Fold : FoldList)
324    if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
325      return;
326  LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
327                    << " operand " << OpNo << "\n  " << *MI << '\n');
328  FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
329}
330
331static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
332                             MachineInstr *MI, unsigned OpNo,
333                             MachineOperand *OpToFold,
334                             const SIInstrInfo *TII) {
335  if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
336    // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
337    unsigned Opc = MI->getOpcode();
338    if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
339         Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
340        (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
341      bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
342                   Opc == AMDGPU::V_FMAC_F16_e64;
343      bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
344                   Opc == AMDGPU::V_FMAC_F32_e64;
345      unsigned NewOpc = IsFMA ?
346        (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
347        (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
348
349      // Check if changing this to a v_mad_{f16, f32} instruction will allow us
350      // to fold the operand.
351      MI->setDesc(TII->get(NewOpc));
352      bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
353      if (FoldAsMAD) {
354        MI->untieRegOperand(OpNo);
355        return true;
356      }
357      MI->setDesc(TII->get(Opc));
358    }
359
360    // Special case for s_setreg_b32
361    if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
362      MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
363      appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
364      return true;
365    }
366
367    // If we are already folding into another operand of MI, then
368    // we can't commute the instruction, otherwise we risk making the
369    // other fold illegal.
370    if (isUseMIInFoldList(FoldList, MI))
371      return false;
372
373    unsigned CommuteOpNo = OpNo;
374
375    // Operand is not legal, so try to commute the instruction to
376    // see if this makes it possible to fold.
377    unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
378    unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
379    bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
380
381    if (CanCommute) {
382      if (CommuteIdx0 == OpNo)
383        CommuteOpNo = CommuteIdx1;
384      else if (CommuteIdx1 == OpNo)
385        CommuteOpNo = CommuteIdx0;
386    }
387
388
389    // One of operands might be an Imm operand, and OpNo may refer to it after
390    // the call of commuteInstruction() below. Such situations are avoided
391    // here explicitly as OpNo must be a register operand to be a candidate
392    // for memory folding.
393    if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
394                       !MI->getOperand(CommuteIdx1).isReg()))
395      return false;
396
397    if (!CanCommute ||
398        !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
399      return false;
400
401    if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
402      if ((Opc == AMDGPU::V_ADD_I32_e64 ||
403           Opc == AMDGPU::V_SUB_I32_e64 ||
404           Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
405          (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
406        MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
407
408        // Verify the other operand is a VGPR, otherwise we would violate the
409        // constant bus restriction.
410        unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
411        MachineOperand &OtherOp = MI->getOperand(OtherIdx);
412        if (!OtherOp.isReg() ||
413            !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
414          return false;
415
416        assert(MI->getOperand(1).isDef());
417
418        // Make sure to get the 32-bit version of the commuted opcode.
419        unsigned MaybeCommutedOpc = MI->getOpcode();
420        int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
421
422        appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
423        return true;
424      }
425
426      TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
427      return false;
428    }
429
430    appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
431    return true;
432  }
433
434  // Check the case where we might introduce a second constant operand to a
435  // scalar instruction
436  if (TII->isSALU(MI->getOpcode())) {
437    const MCInstrDesc &InstDesc = MI->getDesc();
438    const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
439    const SIRegisterInfo &SRI = TII->getRegisterInfo();
440
441    // Fine if the operand can be encoded as an inline constant
442    if (OpToFold->isImm()) {
443      if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
444          !TII->isInlineConstant(*OpToFold, OpInfo)) {
445        // Otherwise check for another constant
446        for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
447          auto &Op = MI->getOperand(i);
448          if (OpNo != i &&
449              TII->isLiteralConstantLike(Op, OpInfo)) {
450            return false;
451          }
452        }
453      }
454    }
455  }
456
457  appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
458  return true;
459}
460
461// If the use operand doesn't care about the value, this may be an operand only
462// used for register indexing, in which case it is unsafe to fold.
463static bool isUseSafeToFold(const SIInstrInfo *TII,
464                            const MachineInstr &MI,
465                            const MachineOperand &UseMO) {
466  return !UseMO.isUndef() && !TII->isSDWA(MI);
467  //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
468}
469
470// Find a def of the UseReg, check if it is a reg_seqence and find initializers
471// for each subreg, tracking it to foldable inline immediate if possible.
472// Returns true on success.
473static bool getRegSeqInit(
474    SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
475    Register UseReg, uint8_t OpTy,
476    const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
477  MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
478  if (!Def || !Def->isRegSequence())
479    return false;
480
481  for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
482    MachineOperand *Sub = &Def->getOperand(I);
483    assert (Sub->isReg());
484
485    for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
486         SubDef && Sub->isReg() && !Sub->getSubReg() &&
487         TII->isFoldableCopy(*SubDef);
488         SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
489      MachineOperand *Op = &SubDef->getOperand(1);
490      if (Op->isImm()) {
491        if (TII->isInlineConstant(*Op, OpTy))
492          Sub = Op;
493        break;
494      }
495      if (!Op->isReg())
496        break;
497      Sub = Op;
498    }
499
500    Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
501  }
502
503  return true;
504}
505
506static bool tryToFoldACImm(const SIInstrInfo *TII,
507                           const MachineOperand &OpToFold,
508                           MachineInstr *UseMI,
509                           unsigned UseOpIdx,
510                           SmallVectorImpl<FoldCandidate> &FoldList) {
511  const MCInstrDesc &Desc = UseMI->getDesc();
512  const MCOperandInfo *OpInfo = Desc.OpInfo;
513  if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
514    return false;
515
516  uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
517  if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
518      OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
519    return false;
520
521  if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
522      TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
523    UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
524    return true;
525  }
526
527  if (!OpToFold.isReg())
528    return false;
529
530  Register UseReg = OpToFold.getReg();
531  if (!Register::isVirtualRegister(UseReg))
532    return false;
533
534  if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
535        return FC.UseMI == UseMI; }) != FoldList.end())
536    return false;
537
538  MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
539  SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
540  if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
541    return false;
542
543  int32_t Imm;
544  for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
545    const MachineOperand *Op = Defs[I].first;
546    if (!Op->isImm())
547      return false;
548
549    auto SubImm = Op->getImm();
550    if (!I) {
551      Imm = SubImm;
552      if (!TII->isInlineConstant(*Op, OpTy) ||
553          !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
554        return false;
555
556      continue;
557    }
558    if (Imm != SubImm)
559      return false; // Can only fold splat constants
560  }
561
562  appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
563  return true;
564}
565
566void SIFoldOperands::foldOperand(
567  MachineOperand &OpToFold,
568  MachineInstr *UseMI,
569  int UseOpIdx,
570  SmallVectorImpl<FoldCandidate> &FoldList,
571  SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
572  const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
573
574  if (!isUseSafeToFold(TII, *UseMI, UseOp))
575    return;
576
577  // FIXME: Fold operands with subregs.
578  if (UseOp.isReg() && OpToFold.isReg()) {
579    if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
580      return;
581  }
582
583  // Special case for REG_SEQUENCE: We can't fold literals into
584  // REG_SEQUENCE instructions, so we have to fold them into the
585  // uses of REG_SEQUENCE.
586  if (UseMI->isRegSequence()) {
587    Register RegSeqDstReg = UseMI->getOperand(0).getReg();
588    unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
589
590    MachineRegisterInfo::use_iterator Next;
591    for (MachineRegisterInfo::use_iterator
592           RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
593         RSUse != RSE; RSUse = Next) {
594      Next = std::next(RSUse);
595
596      MachineInstr *RSUseMI = RSUse->getParent();
597
598      if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
599                         RSUse.getOperandNo(), FoldList))
600        continue;
601
602      if (RSUse->getSubReg() != RegSeqDstSubReg)
603        continue;
604
605      foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
606                  CopiesToReplace);
607    }
608
609    return;
610  }
611
612  if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
613    return;
614
615  if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
616    // Sanity check that this is a stack access.
617    // FIXME: Should probably use stack pseudos before frame lowering.
618
619    if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
620        MFI->getScratchRSrcReg())
621      return;
622
623    // Ensure this is either relative to the current frame or the current wave.
624    MachineOperand &SOff =
625        *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
626    if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) &&
627        (!SOff.isImm() || SOff.getImm() != 0))
628      return;
629
630    // A frame index will resolve to a positive constant, so it should always be
631    // safe to fold the addressing mode, even pre-GFX9.
632    UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
633
634    // If this is relative to the current wave, update it to be relative to the
635    // current frame.
636    if (SOff.isImm())
637      SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false);
638    return;
639  }
640
641  bool FoldingImmLike =
642      OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
643
644  if (FoldingImmLike && UseMI->isCopy()) {
645    Register DestReg = UseMI->getOperand(0).getReg();
646
647    // Don't fold into a copy to a physical register. Doing so would interfere
648    // with the register coalescer's logic which would avoid redundant
649    // initalizations.
650    if (DestReg.isPhysical())
651      return;
652
653    const TargetRegisterClass *DestRC =  MRI->getRegClass(DestReg);
654
655    Register SrcReg = UseMI->getOperand(1).getReg();
656    if (SrcReg.isVirtual()) { // XXX - This can be an assert?
657      const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
658      if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
659        MachineRegisterInfo::use_iterator NextUse;
660        SmallVector<FoldCandidate, 4> CopyUses;
661        for (MachineRegisterInfo::use_iterator
662          Use = MRI->use_begin(DestReg), E = MRI->use_end();
663          Use != E; Use = NextUse) {
664          NextUse = std::next(Use);
665          FoldCandidate FC = FoldCandidate(Use->getParent(),
666           Use.getOperandNo(), &UseMI->getOperand(1));
667          CopyUses.push_back(FC);
668       }
669        for (auto & F : CopyUses) {
670          foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
671           FoldList, CopiesToReplace);
672        }
673      }
674    }
675
676    if (DestRC == &AMDGPU::AGPR_32RegClass &&
677        TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
678      UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
679      UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
680      CopiesToReplace.push_back(UseMI);
681      return;
682    }
683
684    // In order to fold immediates into copies, we need to change the
685    // copy to a MOV.
686
687    unsigned MovOp = TII->getMovOpcode(DestRC);
688    if (MovOp == AMDGPU::COPY)
689      return;
690
691    UseMI->setDesc(TII->get(MovOp));
692    MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
693    MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
694    while (ImpOpI != ImpOpE) {
695      MachineInstr::mop_iterator Tmp = ImpOpI;
696      ImpOpI++;
697      UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
698    }
699    CopiesToReplace.push_back(UseMI);
700  } else {
701    if (UseMI->isCopy() && OpToFold.isReg() &&
702        UseMI->getOperand(0).getReg().isVirtual() &&
703        !UseMI->getOperand(1).getSubReg()) {
704      LLVM_DEBUG(dbgs() << "Folding " << OpToFold
705                        << "\n into " << *UseMI << '\n');
706      unsigned Size = TII->getOpSize(*UseMI, 1);
707      Register UseReg = OpToFold.getReg();
708      UseMI->getOperand(1).setReg(UseReg);
709      UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
710      UseMI->getOperand(1).setIsKill(false);
711      CopiesToReplace.push_back(UseMI);
712      OpToFold.setIsKill(false);
713
714      // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
715      // can only accept VGPR or inline immediate. Recreate a reg_sequence with
716      // its initializers right here, so we will rematerialize immediates and
717      // avoid copies via different reg classes.
718      SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
719      if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
720          getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
721                        *MRI)) {
722        const DebugLoc &DL = UseMI->getDebugLoc();
723        MachineBasicBlock &MBB = *UseMI->getParent();
724
725        UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
726        for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
727          UseMI->RemoveOperand(I);
728
729        MachineInstrBuilder B(*MBB.getParent(), UseMI);
730        DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
731        SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
732        for (unsigned I = 0; I < Size / 4; ++I) {
733          MachineOperand *Def = Defs[I].first;
734          TargetInstrInfo::RegSubRegPair CopyToVGPR;
735          if (Def->isImm() &&
736              TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
737            int64_t Imm = Def->getImm();
738
739            auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
740            BuildMI(MBB, UseMI, DL,
741                    TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
742            B.addReg(Tmp);
743          } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
744            auto Src = getRegSubRegPair(*Def);
745            Def->setIsKill(false);
746            if (!SeenAGPRs.insert(Src)) {
747              // We cannot build a reg_sequence out of the same registers, they
748              // must be copied. Better do it here before copyPhysReg() created
749              // several reads to do the AGPR->VGPR->AGPR copy.
750              CopyToVGPR = Src;
751            } else {
752              B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
753                       Src.SubReg);
754            }
755          } else {
756            assert(Def->isReg());
757            Def->setIsKill(false);
758            auto Src = getRegSubRegPair(*Def);
759
760            // Direct copy from SGPR to AGPR is not possible. To avoid creation
761            // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
762            // create a copy here and track if we already have such a copy.
763            if (TRI->isSGPRReg(*MRI, Src.Reg)) {
764              CopyToVGPR = Src;
765            } else {
766              auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
767              BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
768              B.addReg(Tmp);
769            }
770          }
771
772          if (CopyToVGPR.Reg) {
773            Register Vgpr;
774            if (VGPRCopies.count(CopyToVGPR)) {
775              Vgpr = VGPRCopies[CopyToVGPR];
776            } else {
777              Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778              BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
779              VGPRCopies[CopyToVGPR] = Vgpr;
780            }
781            auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
782            BuildMI(MBB, UseMI, DL,
783                    TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
784            B.addReg(Tmp);
785          }
786
787          B.addImm(Defs[I].second);
788        }
789        LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
790        return;
791      }
792
793      if (Size != 4)
794        return;
795      if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
796          TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
797        UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
798      else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
799               TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
800        UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
801      return;
802    }
803
804    unsigned UseOpc = UseMI->getOpcode();
805    if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
806        (UseOpc == AMDGPU::V_READLANE_B32 &&
807         (int)UseOpIdx ==
808         AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
809      // %vgpr = V_MOV_B32 imm
810      // %sgpr = V_READFIRSTLANE_B32 %vgpr
811      // =>
812      // %sgpr = S_MOV_B32 imm
813      if (FoldingImmLike) {
814        if (execMayBeModifiedBeforeUse(*MRI,
815                                       UseMI->getOperand(UseOpIdx).getReg(),
816                                       *OpToFold.getParent(),
817                                       *UseMI))
818          return;
819
820        UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
821
822        // FIXME: ChangeToImmediate should clear subreg
823        UseMI->getOperand(1).setSubReg(0);
824        if (OpToFold.isImm())
825          UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
826        else
827          UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
828        UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
829        return;
830      }
831
832      if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
833        if (execMayBeModifiedBeforeUse(*MRI,
834                                       UseMI->getOperand(UseOpIdx).getReg(),
835                                       *OpToFold.getParent(),
836                                       *UseMI))
837          return;
838
839        // %vgpr = COPY %sgpr0
840        // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
841        // =>
842        // %sgpr1 = COPY %sgpr0
843        UseMI->setDesc(TII->get(AMDGPU::COPY));
844        UseMI->getOperand(1).setReg(OpToFold.getReg());
845        UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
846        UseMI->getOperand(1).setIsKill(false);
847        UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
848        return;
849      }
850    }
851
852    const MCInstrDesc &UseDesc = UseMI->getDesc();
853
854    // Don't fold into target independent nodes.  Target independent opcodes
855    // don't have defined register classes.
856    if (UseDesc.isVariadic() ||
857        UseOp.isImplicit() ||
858        UseDesc.OpInfo[UseOpIdx].RegClass == -1)
859      return;
860  }
861
862  if (!FoldingImmLike) {
863    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
864
865    // FIXME: We could try to change the instruction from 64-bit to 32-bit
866    // to enable more folding opportunites.  The shrink operands pass
867    // already does this.
868    return;
869  }
870
871
872  const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
873  const TargetRegisterClass *FoldRC =
874    TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
875
876  // Split 64-bit constants into 32-bits for folding.
877  if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
878    Register UseReg = UseOp.getReg();
879    const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
880
881    if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
882      return;
883
884    APInt Imm(64, OpToFold.getImm());
885    if (UseOp.getSubReg() == AMDGPU::sub0) {
886      Imm = Imm.getLoBits(32);
887    } else {
888      assert(UseOp.getSubReg() == AMDGPU::sub1);
889      Imm = Imm.getHiBits(32);
890    }
891
892    MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
893    tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
894    return;
895  }
896
897
898
899  tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
900}
901
902static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
903                                  uint32_t LHS, uint32_t RHS) {
904  switch (Opcode) {
905  case AMDGPU::V_AND_B32_e64:
906  case AMDGPU::V_AND_B32_e32:
907  case AMDGPU::S_AND_B32:
908    Result = LHS & RHS;
909    return true;
910  case AMDGPU::V_OR_B32_e64:
911  case AMDGPU::V_OR_B32_e32:
912  case AMDGPU::S_OR_B32:
913    Result = LHS | RHS;
914    return true;
915  case AMDGPU::V_XOR_B32_e64:
916  case AMDGPU::V_XOR_B32_e32:
917  case AMDGPU::S_XOR_B32:
918    Result = LHS ^ RHS;
919    return true;
920  case AMDGPU::S_XNOR_B32:
921    Result = ~(LHS ^ RHS);
922    return true;
923  case AMDGPU::S_NAND_B32:
924    Result = ~(LHS & RHS);
925    return true;
926  case AMDGPU::S_NOR_B32:
927    Result = ~(LHS | RHS);
928    return true;
929  case AMDGPU::S_ANDN2_B32:
930    Result = LHS & ~RHS;
931    return true;
932  case AMDGPU::S_ORN2_B32:
933    Result = LHS | ~RHS;
934    return true;
935  case AMDGPU::V_LSHL_B32_e64:
936  case AMDGPU::V_LSHL_B32_e32:
937  case AMDGPU::S_LSHL_B32:
938    // The instruction ignores the high bits for out of bounds shifts.
939    Result = LHS << (RHS & 31);
940    return true;
941  case AMDGPU::V_LSHLREV_B32_e64:
942  case AMDGPU::V_LSHLREV_B32_e32:
943    Result = RHS << (LHS & 31);
944    return true;
945  case AMDGPU::V_LSHR_B32_e64:
946  case AMDGPU::V_LSHR_B32_e32:
947  case AMDGPU::S_LSHR_B32:
948    Result = LHS >> (RHS & 31);
949    return true;
950  case AMDGPU::V_LSHRREV_B32_e64:
951  case AMDGPU::V_LSHRREV_B32_e32:
952    Result = RHS >> (LHS & 31);
953    return true;
954  case AMDGPU::V_ASHR_I32_e64:
955  case AMDGPU::V_ASHR_I32_e32:
956  case AMDGPU::S_ASHR_I32:
957    Result = static_cast<int32_t>(LHS) >> (RHS & 31);
958    return true;
959  case AMDGPU::V_ASHRREV_I32_e64:
960  case AMDGPU::V_ASHRREV_I32_e32:
961    Result = static_cast<int32_t>(RHS) >> (LHS & 31);
962    return true;
963  default:
964    return false;
965  }
966}
967
968static unsigned getMovOpc(bool IsScalar) {
969  return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
970}
971
972/// Remove any leftover implicit operands from mutating the instruction. e.g.
973/// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
974/// anymore.
975static void stripExtraCopyOperands(MachineInstr &MI) {
976  const MCInstrDesc &Desc = MI.getDesc();
977  unsigned NumOps = Desc.getNumOperands() +
978                    Desc.getNumImplicitUses() +
979                    Desc.getNumImplicitDefs();
980
981  for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
982    MI.RemoveOperand(I);
983}
984
985static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
986  MI.setDesc(NewDesc);
987  stripExtraCopyOperands(MI);
988}
989
990static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
991                                               MachineOperand &Op) {
992  if (Op.isReg()) {
993    // If this has a subregister, it obviously is a register source.
994    if (Op.getSubReg() != AMDGPU::NoSubRegister ||
995        !Register::isVirtualRegister(Op.getReg()))
996      return &Op;
997
998    MachineInstr *Def = MRI.getVRegDef(Op.getReg());
999    if (Def && Def->isMoveImmediate()) {
1000      MachineOperand &ImmSrc = Def->getOperand(1);
1001      if (ImmSrc.isImm())
1002        return &ImmSrc;
1003    }
1004  }
1005
1006  return &Op;
1007}
1008
1009// Try to simplify operations with a constant that may appear after instruction
1010// selection.
1011// TODO: See if a frame index with a fixed offset can fold.
1012static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1013                              const SIInstrInfo *TII,
1014                              MachineInstr *MI,
1015                              MachineOperand *ImmOp) {
1016  unsigned Opc = MI->getOpcode();
1017  if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1018      Opc == AMDGPU::S_NOT_B32) {
1019    MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1020    mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1021    return true;
1022  }
1023
1024  int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1025  if (Src1Idx == -1)
1026    return false;
1027
1028  int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1029  MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1030  MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1031
1032  if (!Src0->isImm() && !Src1->isImm())
1033    return false;
1034
1035  if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32 ||
1036      MI->getOpcode() == AMDGPU::V_LSHL_ADD_U32 ||
1037      MI->getOpcode() == AMDGPU::V_AND_OR_B32) {
1038    if (Src0->isImm() && Src0->getImm() == 0) {
1039      // v_lshl_or_b32 0, X, Y -> copy Y
1040      // v_lshl_or_b32 0, X, K -> v_mov_b32 K
1041      // v_lshl_add_b32 0, X, Y -> copy Y
1042      // v_lshl_add_b32 0, X, K -> v_mov_b32 K
1043      // v_and_or_b32 0, X, Y -> copy Y
1044      // v_and_or_b32 0, X, K -> v_mov_b32 K
1045      bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
1046      MI->RemoveOperand(Src1Idx);
1047      MI->RemoveOperand(Src0Idx);
1048
1049      MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
1050      return true;
1051    }
1052  }
1053
1054  // and k0, k1 -> v_mov_b32 (k0 & k1)
1055  // or k0, k1 -> v_mov_b32 (k0 | k1)
1056  // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1057  if (Src0->isImm() && Src1->isImm()) {
1058    int32_t NewImm;
1059    if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1060      return false;
1061
1062    const SIRegisterInfo &TRI = TII->getRegisterInfo();
1063    bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1064
1065    // Be careful to change the right operand, src0 may belong to a different
1066    // instruction.
1067    MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1068    MI->RemoveOperand(Src1Idx);
1069    mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1070    return true;
1071  }
1072
1073  if (!MI->isCommutable())
1074    return false;
1075
1076  if (Src0->isImm() && !Src1->isImm()) {
1077    std::swap(Src0, Src1);
1078    std::swap(Src0Idx, Src1Idx);
1079  }
1080
1081  int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1082  if (Opc == AMDGPU::V_OR_B32_e64 ||
1083      Opc == AMDGPU::V_OR_B32_e32 ||
1084      Opc == AMDGPU::S_OR_B32) {
1085    if (Src1Val == 0) {
1086      // y = or x, 0 => y = copy x
1087      MI->RemoveOperand(Src1Idx);
1088      mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1089    } else if (Src1Val == -1) {
1090      // y = or x, -1 => y = v_mov_b32 -1
1091      MI->RemoveOperand(Src1Idx);
1092      mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1093    } else
1094      return false;
1095
1096    return true;
1097  }
1098
1099  if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1100      MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1101      MI->getOpcode() == AMDGPU::S_AND_B32) {
1102    if (Src1Val == 0) {
1103      // y = and x, 0 => y = v_mov_b32 0
1104      MI->RemoveOperand(Src0Idx);
1105      mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1106    } else if (Src1Val == -1) {
1107      // y = and x, -1 => y = copy x
1108      MI->RemoveOperand(Src1Idx);
1109      mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1110      stripExtraCopyOperands(*MI);
1111    } else
1112      return false;
1113
1114    return true;
1115  }
1116
1117  if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1118      MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1119      MI->getOpcode() == AMDGPU::S_XOR_B32) {
1120    if (Src1Val == 0) {
1121      // y = xor x, 0 => y = copy x
1122      MI->RemoveOperand(Src1Idx);
1123      mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1124      return true;
1125    }
1126  }
1127
1128  return false;
1129}
1130
1131// Try to fold an instruction into a simpler one
1132static bool tryFoldInst(const SIInstrInfo *TII,
1133                        MachineInstr *MI) {
1134  unsigned Opc = MI->getOpcode();
1135
1136  if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1137      Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1138      Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1139    const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1140    const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1141    int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1142    int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1143    if (Src1->isIdenticalTo(*Src0) &&
1144        (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1145        (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1146      LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1147      auto &NewDesc =
1148          TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1149      int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1150      if (Src2Idx != -1)
1151        MI->RemoveOperand(Src2Idx);
1152      MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1153      if (Src1ModIdx != -1)
1154        MI->RemoveOperand(Src1ModIdx);
1155      if (Src0ModIdx != -1)
1156        MI->RemoveOperand(Src0ModIdx);
1157      mutateCopyOp(*MI, NewDesc);
1158      LLVM_DEBUG(dbgs() << *MI << '\n');
1159      return true;
1160    }
1161  }
1162
1163  return false;
1164}
1165
1166void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1167                                     MachineOperand &OpToFold) const {
1168  // We need mutate the operands of new mov instructions to add implicit
1169  // uses of EXEC, but adding them invalidates the use_iterator, so defer
1170  // this.
1171  SmallVector<MachineInstr *, 4> CopiesToReplace;
1172  SmallVector<FoldCandidate, 4> FoldList;
1173  MachineOperand &Dst = MI.getOperand(0);
1174
1175  bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1176  if (FoldingImm) {
1177    unsigned NumLiteralUses = 0;
1178    MachineOperand *NonInlineUse = nullptr;
1179    int NonInlineUseOpNo = -1;
1180
1181    MachineRegisterInfo::use_iterator NextUse;
1182    for (MachineRegisterInfo::use_iterator
1183           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1184         Use != E; Use = NextUse) {
1185      NextUse = std::next(Use);
1186      MachineInstr *UseMI = Use->getParent();
1187      unsigned OpNo = Use.getOperandNo();
1188
1189      // Folding the immediate may reveal operations that can be constant
1190      // folded or replaced with a copy. This can happen for example after
1191      // frame indices are lowered to constants or from splitting 64-bit
1192      // constants.
1193      //
1194      // We may also encounter cases where one or both operands are
1195      // immediates materialized into a register, which would ordinarily not
1196      // be folded due to multiple uses or operand constraints.
1197
1198      if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1199        LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1200
1201        // Some constant folding cases change the same immediate's use to a new
1202        // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1203        // again. The same constant folded instruction could also have a second
1204        // use operand.
1205        NextUse = MRI->use_begin(Dst.getReg());
1206        FoldList.clear();
1207        continue;
1208      }
1209
1210      // Try to fold any inline immediate uses, and then only fold other
1211      // constants if they have one use.
1212      //
1213      // The legality of the inline immediate must be checked based on the use
1214      // operand, not the defining instruction, because 32-bit instructions
1215      // with 32-bit inline immediate sources may be used to materialize
1216      // constants used in 16-bit operands.
1217      //
1218      // e.g. it is unsafe to fold:
1219      //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1220      //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1221
1222      // Folding immediates with more than one use will increase program size.
1223      // FIXME: This will also reduce register usage, which may be better
1224      // in some cases. A better heuristic is needed.
1225      if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1226        foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1227      } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1228        foldOperand(OpToFold, UseMI, OpNo, FoldList,
1229                    CopiesToReplace);
1230      } else {
1231        if (++NumLiteralUses == 1) {
1232          NonInlineUse = &*Use;
1233          NonInlineUseOpNo = OpNo;
1234        }
1235      }
1236    }
1237
1238    if (NumLiteralUses == 1) {
1239      MachineInstr *UseMI = NonInlineUse->getParent();
1240      foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1241    }
1242  } else {
1243    // Folding register.
1244    SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1245    for (MachineRegisterInfo::use_iterator
1246           Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1247         Use != E; ++Use) {
1248      UsesToProcess.push_back(Use);
1249    }
1250    for (auto U : UsesToProcess) {
1251      MachineInstr *UseMI = U->getParent();
1252
1253      foldOperand(OpToFold, UseMI, U.getOperandNo(),
1254        FoldList, CopiesToReplace);
1255    }
1256  }
1257
1258  MachineFunction *MF = MI.getParent()->getParent();
1259  // Make sure we add EXEC uses to any new v_mov instructions created.
1260  for (MachineInstr *Copy : CopiesToReplace)
1261    Copy->addImplicitDefUseOperands(*MF);
1262
1263  for (FoldCandidate &Fold : FoldList) {
1264    assert(!Fold.isReg() || Fold.OpToFold);
1265    if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1266      Register Reg = Fold.OpToFold->getReg();
1267      MachineInstr *DefMI = Fold.OpToFold->getParent();
1268      if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1269          execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1270        continue;
1271    }
1272    if (updateOperand(Fold, *TII, *TRI, *ST)) {
1273      // Clear kill flags.
1274      if (Fold.isReg()) {
1275        assert(Fold.OpToFold && Fold.OpToFold->isReg());
1276        // FIXME: Probably shouldn't bother trying to fold if not an
1277        // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1278        // copies.
1279        MRI->clearKillFlags(Fold.OpToFold->getReg());
1280      }
1281      LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1282                        << static_cast<int>(Fold.UseOpNo) << " of "
1283                        << *Fold.UseMI << '\n');
1284      tryFoldInst(TII, Fold.UseMI);
1285    } else if (Fold.isCommuted()) {
1286      // Restoring instruction's original operand order if fold has failed.
1287      TII->commuteInstruction(*Fold.UseMI, false);
1288    }
1289  }
1290}
1291
1292// Clamp patterns are canonically selected to v_max_* instructions, so only
1293// handle them.
1294const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1295  unsigned Op = MI.getOpcode();
1296  switch (Op) {
1297  case AMDGPU::V_MAX_F32_e64:
1298  case AMDGPU::V_MAX_F16_e64:
1299  case AMDGPU::V_MAX_F64:
1300  case AMDGPU::V_PK_MAX_F16: {
1301    if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1302      return nullptr;
1303
1304    // Make sure sources are identical.
1305    const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1306    const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1307    if (!Src0->isReg() || !Src1->isReg() ||
1308        Src0->getReg() != Src1->getReg() ||
1309        Src0->getSubReg() != Src1->getSubReg() ||
1310        Src0->getSubReg() != AMDGPU::NoSubRegister)
1311      return nullptr;
1312
1313    // Can't fold up if we have modifiers.
1314    if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1315      return nullptr;
1316
1317    unsigned Src0Mods
1318      = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1319    unsigned Src1Mods
1320      = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1321
1322    // Having a 0 op_sel_hi would require swizzling the output in the source
1323    // instruction, which we can't do.
1324    unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1325                                                      : 0u;
1326    if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1327      return nullptr;
1328    return Src0;
1329  }
1330  default:
1331    return nullptr;
1332  }
1333}
1334
1335// We obviously have multiple uses in a clamp since the register is used twice
1336// in the same instruction.
1337static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1338  int Count = 0;
1339  for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1340       I != E; ++I) {
1341    if (++Count > 1)
1342      return false;
1343  }
1344
1345  return true;
1346}
1347
1348// FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1349bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1350  const MachineOperand *ClampSrc = isClamp(MI);
1351  if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1352    return false;
1353
1354  MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1355
1356  // The type of clamp must be compatible.
1357  if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1358    return false;
1359
1360  MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1361  if (!DefClamp)
1362    return false;
1363
1364  LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1365                    << '\n');
1366
1367  // Clamp is applied after omod, so it is OK if omod is set.
1368  DefClamp->setImm(1);
1369  MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1370  MI.eraseFromParent();
1371  return true;
1372}
1373
1374static int getOModValue(unsigned Opc, int64_t Val) {
1375  switch (Opc) {
1376  case AMDGPU::V_MUL_F32_e64: {
1377    switch (static_cast<uint32_t>(Val)) {
1378    case 0x3f000000: // 0.5
1379      return SIOutMods::DIV2;
1380    case 0x40000000: // 2.0
1381      return SIOutMods::MUL2;
1382    case 0x40800000: // 4.0
1383      return SIOutMods::MUL4;
1384    default:
1385      return SIOutMods::NONE;
1386    }
1387  }
1388  case AMDGPU::V_MUL_F16_e64: {
1389    switch (static_cast<uint16_t>(Val)) {
1390    case 0x3800: // 0.5
1391      return SIOutMods::DIV2;
1392    case 0x4000: // 2.0
1393      return SIOutMods::MUL2;
1394    case 0x4400: // 4.0
1395      return SIOutMods::MUL4;
1396    default:
1397      return SIOutMods::NONE;
1398    }
1399  }
1400  default:
1401    llvm_unreachable("invalid mul opcode");
1402  }
1403}
1404
1405// FIXME: Does this really not support denormals with f16?
1406// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1407// handled, so will anything other than that break?
1408std::pair<const MachineOperand *, int>
1409SIFoldOperands::isOMod(const MachineInstr &MI) const {
1410  unsigned Op = MI.getOpcode();
1411  switch (Op) {
1412  case AMDGPU::V_MUL_F32_e64:
1413  case AMDGPU::V_MUL_F16_e64: {
1414    // If output denormals are enabled, omod is ignored.
1415    if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1416        (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1417      return std::make_pair(nullptr, SIOutMods::NONE);
1418
1419    const MachineOperand *RegOp = nullptr;
1420    const MachineOperand *ImmOp = nullptr;
1421    const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1422    const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1423    if (Src0->isImm()) {
1424      ImmOp = Src0;
1425      RegOp = Src1;
1426    } else if (Src1->isImm()) {
1427      ImmOp = Src1;
1428      RegOp = Src0;
1429    } else
1430      return std::make_pair(nullptr, SIOutMods::NONE);
1431
1432    int OMod = getOModValue(Op, ImmOp->getImm());
1433    if (OMod == SIOutMods::NONE ||
1434        TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1435        TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1436        TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1437        TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1438      return std::make_pair(nullptr, SIOutMods::NONE);
1439
1440    return std::make_pair(RegOp, OMod);
1441  }
1442  case AMDGPU::V_ADD_F32_e64:
1443  case AMDGPU::V_ADD_F16_e64: {
1444    // If output denormals are enabled, omod is ignored.
1445    if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1446        (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1447      return std::make_pair(nullptr, SIOutMods::NONE);
1448
1449    // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1450    const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1451    const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1452
1453    if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1454        Src0->getSubReg() == Src1->getSubReg() &&
1455        !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1456        !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1457        !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1458        !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1459      return std::make_pair(Src0, SIOutMods::MUL2);
1460
1461    return std::make_pair(nullptr, SIOutMods::NONE);
1462  }
1463  default:
1464    return std::make_pair(nullptr, SIOutMods::NONE);
1465  }
1466}
1467
1468// FIXME: Does this need to check IEEE bit on function?
1469bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1470  const MachineOperand *RegOp;
1471  int OMod;
1472  std::tie(RegOp, OMod) = isOMod(MI);
1473  if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1474      RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1475      !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1476    return false;
1477
1478  MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1479  MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1480  if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1481    return false;
1482
1483  // Clamp is applied after omod. If the source already has clamp set, don't
1484  // fold it.
1485  if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1486    return false;
1487
1488  LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1489
1490  DefOMod->setImm(OMod);
1491  MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1492  MI.eraseFromParent();
1493  return true;
1494}
1495
1496bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1497  if (skipFunction(MF.getFunction()))
1498    return false;
1499
1500  MRI = &MF.getRegInfo();
1501  ST = &MF.getSubtarget<GCNSubtarget>();
1502  TII = ST->getInstrInfo();
1503  TRI = &TII->getRegisterInfo();
1504  MFI = MF.getInfo<SIMachineFunctionInfo>();
1505
1506  // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1507  // correctly handle signed zeros.
1508  //
1509  // FIXME: Also need to check strictfp
1510  bool IsIEEEMode = MFI->getMode().IEEE;
1511  bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1512
1513  for (MachineBasicBlock *MBB : depth_first(&MF)) {
1514    MachineBasicBlock::iterator I, Next;
1515
1516    MachineOperand *CurrentKnownM0Val = nullptr;
1517    for (I = MBB->begin(); I != MBB->end(); I = Next) {
1518      Next = std::next(I);
1519      MachineInstr &MI = *I;
1520
1521      tryFoldInst(TII, &MI);
1522
1523      if (!TII->isFoldableCopy(MI)) {
1524        // Saw an unknown clobber of m0, so we no longer know what it is.
1525        if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1526          CurrentKnownM0Val = nullptr;
1527
1528        // TODO: Omod might be OK if there is NSZ only on the source
1529        // instruction, and not the omod multiply.
1530        if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1531            !tryFoldOMod(MI))
1532          tryFoldClamp(MI);
1533
1534        continue;
1535      }
1536
1537      // Specially track simple redefs of m0 to the same value in a block, so we
1538      // can erase the later ones.
1539      if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1540        MachineOperand &NewM0Val = MI.getOperand(1);
1541        if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1542          MI.eraseFromParent();
1543          continue;
1544        }
1545
1546        // We aren't tracking other physical registers
1547        CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1548          nullptr : &NewM0Val;
1549        continue;
1550      }
1551
1552      MachineOperand &OpToFold = MI.getOperand(1);
1553      bool FoldingImm =
1554          OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1555
1556      // FIXME: We could also be folding things like TargetIndexes.
1557      if (!FoldingImm && !OpToFold.isReg())
1558        continue;
1559
1560      if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1561        continue;
1562
1563      // Prevent folding operands backwards in the function. For example,
1564      // the COPY opcode must not be replaced by 1 in this example:
1565      //
1566      //    %3 = COPY %vgpr0; VGPR_32:%3
1567      //    ...
1568      //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1569      MachineOperand &Dst = MI.getOperand(0);
1570      if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1571        continue;
1572
1573      foldInstOperand(MI, OpToFold);
1574    }
1575  }
1576  return true;
1577}
1578