1//===- MachineSink.cpp - Sinking for machine instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass moves instructions into successor blocks when possible, so that
10// they aren't executed on paths where their results aren't needed.
11//
12// This pass is not intended to be a replacement or a complete alternative
13// for an LLVM-IR-level sinking pass. It is only designed to sink simple
14// constructs that are not exposed before lowering and instruction selection.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/DepthFirstIterator.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/PointerIntPair.h"
22#include "llvm/ADT/PostOrderIterator.h"
23#include "llvm/ADT/SetVector.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Analysis/AliasAnalysis.h"
28#include "llvm/Analysis/CFG.h"
29#include "llvm/CodeGen/MachineBasicBlock.h"
30#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
31#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
32#include "llvm/CodeGen/MachineCycleAnalysis.h"
33#include "llvm/CodeGen/MachineDominators.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineFunctionPass.h"
36#include "llvm/CodeGen/MachineInstr.h"
37#include "llvm/CodeGen/MachineLoopInfo.h"
38#include "llvm/CodeGen/MachineOperand.h"
39#include "llvm/CodeGen/MachinePostDominators.h"
40#include "llvm/CodeGen/MachineRegisterInfo.h"
41#include "llvm/CodeGen/RegisterClassInfo.h"
42#include "llvm/CodeGen/RegisterPressure.h"
43#include "llvm/CodeGen/TargetInstrInfo.h"
44#include "llvm/CodeGen/TargetPassConfig.h"
45#include "llvm/CodeGen/TargetRegisterInfo.h"
46#include "llvm/CodeGen/TargetSubtargetInfo.h"
47#include "llvm/IR/BasicBlock.h"
48#include "llvm/IR/DebugInfoMetadata.h"
49#include "llvm/IR/LLVMContext.h"
50#include "llvm/InitializePasses.h"
51#include "llvm/MC/MCRegisterInfo.h"
52#include "llvm/Pass.h"
53#include "llvm/Support/BranchProbability.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Debug.h"
56#include "llvm/Support/raw_ostream.h"
57#include <algorithm>
58#include <cassert>
59#include <cstdint>
60#include <utility>
61#include <vector>
62
63using namespace llvm;
64
65#define DEBUG_TYPE "machine-sink"
66
67static cl::opt<bool>
68SplitEdges("machine-sink-split",
69           cl::desc("Split critical edges during machine sinking"),
70           cl::init(true), cl::Hidden);
71
72static cl::opt<bool>
73UseBlockFreqInfo("machine-sink-bfi",
74           cl::desc("Use block frequency info to find successors to sink"),
75           cl::init(true), cl::Hidden);
76
77static cl::opt<unsigned> SplitEdgeProbabilityThreshold(
78    "machine-sink-split-probability-threshold",
79    cl::desc(
80        "Percentage threshold for splitting single-instruction critical edge. "
81        "If the branch threshold is higher than this threshold, we allow "
82        "speculative execution of up to 1 instruction to avoid branching to "
83        "splitted critical edge"),
84    cl::init(40), cl::Hidden);
85
86static cl::opt<unsigned> SinkLoadInstsPerBlockThreshold(
87    "machine-sink-load-instrs-threshold",
88    cl::desc("Do not try to find alias store for a load if there is a in-path "
89             "block whose instruction number is higher than this threshold."),
90    cl::init(2000), cl::Hidden);
91
92static cl::opt<unsigned> SinkLoadBlocksThreshold(
93    "machine-sink-load-blocks-threshold",
94    cl::desc("Do not try to find alias store for a load if the block number in "
95             "the straight line is higher than this threshold."),
96    cl::init(20), cl::Hidden);
97
98static cl::opt<bool>
99    SinkInstsIntoCycle("sink-insts-to-avoid-spills",
100                       cl::desc("Sink instructions into cycles to avoid "
101                                "register spills"),
102                       cl::init(false), cl::Hidden);
103
104static cl::opt<unsigned> SinkIntoCycleLimit(
105    "machine-sink-cycle-limit",
106    cl::desc("The maximum number of instructions considered for cycle sinking."),
107    cl::init(50), cl::Hidden);
108
109STATISTIC(NumSunk,      "Number of machine instructions sunk");
110STATISTIC(NumCycleSunk,  "Number of machine instructions sunk into a cycle");
111STATISTIC(NumSplit,     "Number of critical edges split");
112STATISTIC(NumCoalesces, "Number of copies coalesced");
113STATISTIC(NumPostRACopySink, "Number of copies sunk after RA");
114
115namespace {
116
117  class MachineSinking : public MachineFunctionPass {
118    const TargetSubtargetInfo *STI = nullptr;
119    const TargetInstrInfo *TII = nullptr;
120    const TargetRegisterInfo *TRI = nullptr;
121    MachineRegisterInfo *MRI = nullptr;      // Machine register information
122    MachineDominatorTree *DT = nullptr;      // Machine dominator tree
123    MachinePostDominatorTree *PDT = nullptr; // Machine post dominator tree
124    MachineCycleInfo *CI = nullptr;
125    MachineBlockFrequencyInfo *MBFI = nullptr;
126    const MachineBranchProbabilityInfo *MBPI = nullptr;
127    AliasAnalysis *AA = nullptr;
128    RegisterClassInfo RegClassInfo;
129
130    // Remember which edges have been considered for breaking.
131    SmallSet<std::pair<MachineBasicBlock*, MachineBasicBlock*>, 8>
132    CEBCandidates;
133    // Remember which edges we are about to split.
134    // This is different from CEBCandidates since those edges
135    // will be split.
136    SetVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> ToSplit;
137
138    DenseSet<Register> RegsToClearKillFlags;
139
140    using AllSuccsCache =
141        DenseMap<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>;
142
143    /// DBG_VALUE pointer and flag. The flag is true if this DBG_VALUE is
144    /// post-dominated by another DBG_VALUE of the same variable location.
145    /// This is necessary to detect sequences such as:
146    ///     %0 = someinst
147    ///     DBG_VALUE %0, !123, !DIExpression()
148    ///     %1 = anotherinst
149    ///     DBG_VALUE %1, !123, !DIExpression()
150    /// Where if %0 were to sink, the DBG_VAUE should not sink with it, as that
151    /// would re-order assignments.
152    using SeenDbgUser = PointerIntPair<MachineInstr *, 1>;
153
154    /// Record of DBG_VALUE uses of vregs in a block, so that we can identify
155    /// debug instructions to sink.
156    SmallDenseMap<unsigned, TinyPtrVector<SeenDbgUser>> SeenDbgUsers;
157
158    /// Record of debug variables that have had their locations set in the
159    /// current block.
160    DenseSet<DebugVariable> SeenDbgVars;
161
162    DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>, bool>
163        HasStoreCache;
164
165    DenseMap<std::pair<MachineBasicBlock *, MachineBasicBlock *>,
166             SmallVector<MachineInstr *>>
167        StoreInstrCache;
168
169    /// Cached BB's register pressure.
170    DenseMap<const MachineBasicBlock *, std::vector<unsigned>>
171        CachedRegisterPressure;
172
173    bool EnableSinkAndFold;
174
175  public:
176    static char ID; // Pass identification
177
178    MachineSinking() : MachineFunctionPass(ID) {
179      initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
180    }
181
182    bool runOnMachineFunction(MachineFunction &MF) override;
183
184    void getAnalysisUsage(AnalysisUsage &AU) const override {
185      MachineFunctionPass::getAnalysisUsage(AU);
186      AU.addRequired<AAResultsWrapperPass>();
187      AU.addRequired<MachineDominatorTree>();
188      AU.addRequired<MachinePostDominatorTree>();
189      AU.addRequired<MachineCycleInfoWrapperPass>();
190      AU.addRequired<MachineBranchProbabilityInfo>();
191      AU.addPreserved<MachineCycleInfoWrapperPass>();
192      AU.addPreserved<MachineLoopInfo>();
193      if (UseBlockFreqInfo)
194        AU.addRequired<MachineBlockFrequencyInfo>();
195      AU.addRequired<TargetPassConfig>();
196    }
197
198    void releaseMemory() override {
199      CEBCandidates.clear();
200    }
201
202  private:
203    bool ProcessBlock(MachineBasicBlock &MBB);
204    void ProcessDbgInst(MachineInstr &MI);
205    bool isWorthBreakingCriticalEdge(MachineInstr &MI,
206                                     MachineBasicBlock *From,
207                                     MachineBasicBlock *To);
208
209    bool hasStoreBetween(MachineBasicBlock *From, MachineBasicBlock *To,
210                         MachineInstr &MI);
211
212    /// Postpone the splitting of the given critical
213    /// edge (\p From, \p To).
214    ///
215    /// We do not split the edges on the fly. Indeed, this invalidates
216    /// the dominance information and thus triggers a lot of updates
217    /// of that information underneath.
218    /// Instead, we postpone all the splits after each iteration of
219    /// the main loop. That way, the information is at least valid
220    /// for the lifetime of an iteration.
221    ///
222    /// \return True if the edge is marked as toSplit, false otherwise.
223    /// False can be returned if, for instance, this is not profitable.
224    bool PostponeSplitCriticalEdge(MachineInstr &MI,
225                                   MachineBasicBlock *From,
226                                   MachineBasicBlock *To,
227                                   bool BreakPHIEdge);
228    bool SinkInstruction(MachineInstr &MI, bool &SawStore,
229                         AllSuccsCache &AllSuccessors);
230
231    /// If we sink a COPY inst, some debug users of it's destination may no
232    /// longer be dominated by the COPY, and will eventually be dropped.
233    /// This is easily rectified by forwarding the non-dominated debug uses
234    /// to the copy source.
235    void SalvageUnsunkDebugUsersOfCopy(MachineInstr &,
236                                       MachineBasicBlock *TargetBlock);
237    bool AllUsesDominatedByBlock(Register Reg, MachineBasicBlock *MBB,
238                                 MachineBasicBlock *DefMBB, bool &BreakPHIEdge,
239                                 bool &LocalUse) const;
240    MachineBasicBlock *FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
241               bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
242
243    void FindCycleSinkCandidates(MachineCycle *Cycle, MachineBasicBlock *BB,
244                                 SmallVectorImpl<MachineInstr *> &Candidates);
245    bool SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I);
246
247    bool isProfitableToSinkTo(Register Reg, MachineInstr &MI,
248                              MachineBasicBlock *MBB,
249                              MachineBasicBlock *SuccToSinkTo,
250                              AllSuccsCache &AllSuccessors);
251
252    bool PerformTrivialForwardCoalescing(MachineInstr &MI,
253                                         MachineBasicBlock *MBB);
254
255    bool PerformSinkAndFold(MachineInstr &MI, MachineBasicBlock *MBB);
256
257    SmallVector<MachineBasicBlock *, 4> &
258    GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
259                           AllSuccsCache &AllSuccessors) const;
260
261    std::vector<unsigned> &getBBRegisterPressure(const MachineBasicBlock &MBB);
262
263    bool registerPressureSetExceedsLimit(unsigned NRegs,
264                                         const TargetRegisterClass *RC,
265                                         const MachineBasicBlock &MBB);
266  };
267
268} // end anonymous namespace
269
270char MachineSinking::ID = 0;
271
272char &llvm::MachineSinkingID = MachineSinking::ID;
273
274INITIALIZE_PASS_BEGIN(MachineSinking, DEBUG_TYPE,
275                      "Machine code sinking", false, false)
276INITIALIZE_PASS_DEPENDENCY(MachineBranchProbabilityInfo)
277INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
278INITIALIZE_PASS_DEPENDENCY(MachineCycleInfoWrapperPass)
279INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
280INITIALIZE_PASS_END(MachineSinking, DEBUG_TYPE,
281                    "Machine code sinking", false, false)
282
283/// Return true if a target defined block prologue instruction interferes
284/// with a sink candidate.
285static bool blockPrologueInterferes(const MachineBasicBlock *BB,
286                                    MachineBasicBlock::const_iterator End,
287                                    const MachineInstr &MI,
288                                    const TargetRegisterInfo *TRI,
289                                    const TargetInstrInfo *TII,
290                                    const MachineRegisterInfo *MRI) {
291  for (MachineBasicBlock::const_iterator PI = BB->getFirstNonPHI(); PI != End;
292       ++PI) {
293    // Only check target defined prologue instructions
294    if (!TII->isBasicBlockPrologue(*PI))
295      continue;
296    for (auto &MO : MI.operands()) {
297      if (!MO.isReg())
298        continue;
299      Register Reg = MO.getReg();
300      if (!Reg)
301        continue;
302      if (MO.isUse()) {
303        if (Reg.isPhysical() &&
304            (TII->isIgnorableUse(MO) || (MRI && MRI->isConstantPhysReg(Reg))))
305          continue;
306        if (PI->modifiesRegister(Reg, TRI))
307          return true;
308      } else {
309        if (PI->readsRegister(Reg, TRI))
310          return true;
311        // Check for interference with non-dead defs
312        auto *DefOp = PI->findRegisterDefOperand(Reg, false, true, TRI);
313        if (DefOp && !DefOp->isDead())
314          return true;
315      }
316    }
317  }
318
319  return false;
320}
321
322bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr &MI,
323                                                     MachineBasicBlock *MBB) {
324  if (!MI.isCopy())
325    return false;
326
327  Register SrcReg = MI.getOperand(1).getReg();
328  Register DstReg = MI.getOperand(0).getReg();
329  if (!SrcReg.isVirtual() || !DstReg.isVirtual() ||
330      !MRI->hasOneNonDBGUse(SrcReg))
331    return false;
332
333  const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
334  const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
335  if (SRC != DRC)
336    return false;
337
338  MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
339  if (DefMI->isCopyLike())
340    return false;
341  LLVM_DEBUG(dbgs() << "Coalescing: " << *DefMI);
342  LLVM_DEBUG(dbgs() << "*** to: " << MI);
343  MRI->replaceRegWith(DstReg, SrcReg);
344  MI.eraseFromParent();
345
346  // Conservatively, clear any kill flags, since it's possible that they are no
347  // longer correct.
348  MRI->clearKillFlags(SrcReg);
349
350  ++NumCoalesces;
351  return true;
352}
353
354bool MachineSinking::PerformSinkAndFold(MachineInstr &MI,
355                                        MachineBasicBlock *MBB) {
356  if (MI.isCopy() || MI.mayLoadOrStore() ||
357      MI.getOpcode() == TargetOpcode::REG_SEQUENCE)
358    return false;
359
360  // Don't sink instructions that the target prefers not to sink.
361  if (!TII->shouldSink(MI))
362    return false;
363
364  // Check if it's safe to move the instruction.
365  bool SawStore = true;
366  if (!MI.isSafeToMove(AA, SawStore))
367    return false;
368
369  // Convergent operations may not be made control-dependent on additional
370  // values.
371  if (MI.isConvergent())
372    return false;
373
374  // Don't sink defs/uses of hard registers or if the instruction defines more
375  // than one register.
376  // Don't sink more than two register uses - it'll cover most of the cases and
377  // greatly simplifies the register pressure checks.
378  Register DefReg;
379  Register UsedRegA, UsedRegB;
380  for (const MachineOperand &MO : MI.operands()) {
381    if (MO.isImm() || MO.isRegMask() || MO.isRegLiveOut() || MO.isMetadata() ||
382        MO.isMCSymbol() || MO.isDbgInstrRef() || MO.isCFIIndex() ||
383        MO.isIntrinsicID() || MO.isPredicate() || MO.isShuffleMask())
384      continue;
385    if (!MO.isReg())
386      return false;
387
388    Register Reg = MO.getReg();
389    if (Reg == 0)
390      continue;
391
392    if (Reg.isVirtual()) {
393      if (MO.isDef()) {
394        if (DefReg)
395          return false;
396        DefReg = Reg;
397        continue;
398      }
399
400      if (UsedRegA == 0)
401        UsedRegA = Reg;
402      else if (UsedRegB == 0)
403        UsedRegB = Reg;
404      else
405        return false;
406      continue;
407    }
408
409    if (Reg.isPhysical() &&
410        (MRI->isConstantPhysReg(Reg) || TII->isIgnorableUse(MO)))
411      continue;
412
413    return false;
414  }
415
416  // Scan uses of the destination register. Every use, except the last, must be
417  // a copy, with a chain of copies terminating with either a copy into a hard
418  // register, or a load/store instruction where the use is part of the
419  // address (*not* the stored value).
420  using SinkInfo = std::pair<MachineInstr *, ExtAddrMode>;
421  SmallVector<SinkInfo> SinkInto;
422  SmallVector<Register> Worklist;
423
424  const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
425  const TargetRegisterClass *RCA =
426      UsedRegA == 0 ? nullptr : MRI->getRegClass(UsedRegA);
427  const TargetRegisterClass *RCB =
428      UsedRegB == 0 ? nullptr : MRI->getRegClass(UsedRegB);
429
430  Worklist.push_back(DefReg);
431  while (!Worklist.empty()) {
432    Register Reg = Worklist.pop_back_val();
433
434    for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
435      ExtAddrMode MaybeAM;
436      MachineInstr &UseInst = *MO.getParent();
437      if (UseInst.isCopy()) {
438        Register DstReg;
439        if (const MachineOperand &O = UseInst.getOperand(0); O.isReg())
440          DstReg = O.getReg();
441        if (DstReg == 0)
442          return false;
443        if (DstReg.isVirtual()) {
444          Worklist.push_back(DstReg);
445          continue;
446        }
447        // If we are going to replace a copy, the original instruction must be
448        // as cheap as a copy.
449        if (!TII->isAsCheapAsAMove(MI))
450          return false;
451        // The hard register must be in the register class of the original
452        // instruction's destination register.
453        if (!RC->contains(DstReg))
454          return false;
455      } else if (UseInst.mayLoadOrStore()) {
456        ExtAddrMode AM;
457        if (!TII->canFoldIntoAddrMode(UseInst, Reg, MI, AM))
458          return false;
459        MaybeAM = AM;
460      } else {
461        return false;
462      }
463
464      if (UseInst.getParent() != MI.getParent()) {
465        // If the register class of the register we are replacing is a superset
466        // of any of the register classes of the operands of the materialized
467        // instruction don't consider that live range extended.
468        const TargetRegisterClass *RCS = MRI->getRegClass(Reg);
469        if (RCA && RCA->hasSuperClassEq(RCS))
470          RCA = nullptr;
471        else if (RCB && RCB->hasSuperClassEq(RCS))
472          RCB = nullptr;
473        if (RCA || RCB) {
474          if (RCA == nullptr) {
475            RCA = RCB;
476            RCB = nullptr;
477          }
478
479          unsigned NRegs = !!RCA + !!RCB;
480          if (RCA == RCB)
481            RCB = nullptr;
482
483          // Check we don't exceed register pressure at the destination.
484          const MachineBasicBlock &MBB = *UseInst.getParent();
485          if (RCB == nullptr) {
486            if (registerPressureSetExceedsLimit(NRegs, RCA, MBB))
487              return false;
488          } else if (registerPressureSetExceedsLimit(1, RCA, MBB) ||
489                     registerPressureSetExceedsLimit(1, RCB, MBB)) {
490            return false;
491          }
492        }
493      }
494
495      SinkInto.emplace_back(&UseInst, MaybeAM);
496    }
497  }
498
499  if (SinkInto.empty())
500    return false;
501
502  // Now we know we can fold the instruction in all its users.
503  for (auto &[SinkDst, MaybeAM] : SinkInto) {
504    MachineInstr *New = nullptr;
505    LLVM_DEBUG(dbgs() << "Sinking copy of"; MI.dump(); dbgs() << "into";
506               SinkDst->dump());
507    if (SinkDst->isCopy()) {
508      // TODO: After performing the sink-and-fold, the original instruction is
509      // deleted. Its value is still available (in a hard register), so if there
510      // are debug instructions which refer to the (now deleted) virtual
511      // register they could be updated to refer to the hard register, in
512      // principle. However, it's not clear how to do that, moreover in some
513      // cases the debug instructions may need to be replicated proportionally
514      // to the number of the COPY instructions replaced and in some extreme
515      // cases we can end up with quadratic increase in the number of debug
516      // instructions.
517
518      // Sink a copy of the instruction, replacing a COPY instruction.
519      MachineBasicBlock::iterator InsertPt = SinkDst->getIterator();
520      Register DstReg = SinkDst->getOperand(0).getReg();
521      TII->reMaterialize(*SinkDst->getParent(), InsertPt, DstReg, 0, MI, *TRI);
522      New = &*std::prev(InsertPt);
523      if (!New->getDebugLoc())
524        New->setDebugLoc(SinkDst->getDebugLoc());
525
526      // The operand registers of the "sunk" instruction have their live range
527      // extended and their kill flags may no longer be correct. Conservatively
528      // clear the kill flags.
529      if (UsedRegA)
530        MRI->clearKillFlags(UsedRegA);
531      if (UsedRegB)
532        MRI->clearKillFlags(UsedRegB);
533    } else {
534      // Fold instruction into the addressing mode of a memory instruction.
535      New = TII->emitLdStWithAddr(*SinkDst, MaybeAM);
536
537      // The registers of the addressing mode may have their live range extended
538      // and their kill flags may no longer be correct. Conservatively clear the
539      // kill flags.
540      if (Register R = MaybeAM.BaseReg; R.isValid() && R.isVirtual())
541        MRI->clearKillFlags(R);
542      if (Register R = MaybeAM.ScaledReg; R.isValid() && R.isVirtual())
543        MRI->clearKillFlags(R);
544    }
545    LLVM_DEBUG(dbgs() << "yielding"; New->dump());
546    // Clear the StoreInstrCache, since we may invalidate it by erasing.
547    if (SinkDst->mayStore() && !SinkDst->hasOrderedMemoryRef())
548      StoreInstrCache.clear();
549    SinkDst->eraseFromParent();
550  }
551
552  // Collect operands that need to be cleaned up because the registers no longer
553  // exist (in COPYs and debug instructions). We cannot delete instructions or
554  // clear operands while traversing register uses.
555  SmallVector<MachineOperand *> Cleanup;
556  Worklist.push_back(DefReg);
557  while (!Worklist.empty()) {
558    Register Reg = Worklist.pop_back_val();
559    for (MachineOperand &MO : MRI->use_operands(Reg)) {
560      MachineInstr *U = MO.getParent();
561      assert((U->isCopy() || U->isDebugInstr()) &&
562             "Only debug uses and copies must remain");
563      if (U->isCopy())
564        Worklist.push_back(U->getOperand(0).getReg());
565      Cleanup.push_back(&MO);
566    }
567  }
568
569  // Delete the dead COPYs and clear operands in debug instructions
570  for (MachineOperand *MO : Cleanup) {
571    MachineInstr *I = MO->getParent();
572    if (I->isCopy()) {
573      I->eraseFromParent();
574    } else {
575      MO->setReg(0);
576      MO->setSubReg(0);
577    }
578  }
579
580  MI.eraseFromParent();
581  return true;
582}
583
584/// AllUsesDominatedByBlock - Return true if all uses of the specified register
585/// occur in blocks dominated by the specified block. If any use is in the
586/// definition block, then return false since it is never legal to move def
587/// after uses.
588bool MachineSinking::AllUsesDominatedByBlock(Register Reg,
589                                             MachineBasicBlock *MBB,
590                                             MachineBasicBlock *DefMBB,
591                                             bool &BreakPHIEdge,
592                                             bool &LocalUse) const {
593  assert(Reg.isVirtual() && "Only makes sense for vregs");
594
595  // Ignore debug uses because debug info doesn't affect the code.
596  if (MRI->use_nodbg_empty(Reg))
597    return true;
598
599  // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
600  // into and they are all PHI nodes. In this case, machine-sink must break
601  // the critical edge first. e.g.
602  //
603  // %bb.1:
604  //   Predecessors according to CFG: %bb.0
605  //     ...
606  //     %def = DEC64_32r %x, implicit-def dead %eflags
607  //     ...
608  //     JE_4 <%bb.37>, implicit %eflags
609  //   Successors according to CFG: %bb.37 %bb.2
610  //
611  // %bb.2:
612  //     %p = PHI %y, %bb.0, %def, %bb.1
613  if (all_of(MRI->use_nodbg_operands(Reg), [&](MachineOperand &MO) {
614        MachineInstr *UseInst = MO.getParent();
615        unsigned OpNo = MO.getOperandNo();
616        MachineBasicBlock *UseBlock = UseInst->getParent();
617        return UseBlock == MBB && UseInst->isPHI() &&
618               UseInst->getOperand(OpNo + 1).getMBB() == DefMBB;
619      })) {
620    BreakPHIEdge = true;
621    return true;
622  }
623
624  for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
625    // Determine the block of the use.
626    MachineInstr *UseInst = MO.getParent();
627    unsigned OpNo = &MO - &UseInst->getOperand(0);
628    MachineBasicBlock *UseBlock = UseInst->getParent();
629    if (UseInst->isPHI()) {
630      // PHI nodes use the operand in the predecessor block, not the block with
631      // the PHI.
632      UseBlock = UseInst->getOperand(OpNo+1).getMBB();
633    } else if (UseBlock == DefMBB) {
634      LocalUse = true;
635      return false;
636    }
637
638    // Check that it dominates.
639    if (!DT->dominates(MBB, UseBlock))
640      return false;
641  }
642
643  return true;
644}
645
646/// Return true if this machine instruction loads from global offset table or
647/// constant pool.
648static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
649  assert(MI.mayLoad() && "Expected MI that loads!");
650
651  // If we lost memory operands, conservatively assume that the instruction
652  // reads from everything..
653  if (MI.memoperands_empty())
654    return true;
655
656  for (MachineMemOperand *MemOp : MI.memoperands())
657    if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
658      if (PSV->isGOT() || PSV->isConstantPool())
659        return true;
660
661  return false;
662}
663
664void MachineSinking::FindCycleSinkCandidates(
665    MachineCycle *Cycle, MachineBasicBlock *BB,
666    SmallVectorImpl<MachineInstr *> &Candidates) {
667  for (auto &MI : *BB) {
668    LLVM_DEBUG(dbgs() << "CycleSink: Analysing candidate: " << MI);
669    if (!TII->shouldSink(MI)) {
670      LLVM_DEBUG(dbgs() << "CycleSink: Instruction not a candidate for this "
671                           "target\n");
672      continue;
673    }
674    if (!isCycleInvariant(Cycle, MI)) {
675      LLVM_DEBUG(dbgs() << "CycleSink: Instruction is not cycle invariant\n");
676      continue;
677    }
678    bool DontMoveAcrossStore = true;
679    if (!MI.isSafeToMove(AA, DontMoveAcrossStore)) {
680      LLVM_DEBUG(dbgs() << "CycleSink: Instruction not safe to move.\n");
681      continue;
682    }
683    if (MI.mayLoad() && !mayLoadFromGOTOrConstantPool(MI)) {
684      LLVM_DEBUG(dbgs() << "CycleSink: Dont sink GOT or constant pool loads\n");
685      continue;
686    }
687    if (MI.isConvergent())
688      continue;
689
690    const MachineOperand &MO = MI.getOperand(0);
691    if (!MO.isReg() || !MO.getReg() || !MO.isDef())
692      continue;
693    if (!MRI->hasOneDef(MO.getReg()))
694      continue;
695
696    LLVM_DEBUG(dbgs() << "CycleSink: Instruction added as candidate.\n");
697    Candidates.push_back(&MI);
698  }
699}
700
701bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
702  if (skipFunction(MF.getFunction()))
703    return false;
704
705  LLVM_DEBUG(dbgs() << "******** Machine Sinking ********\n");
706
707  STI = &MF.getSubtarget();
708  TII = STI->getInstrInfo();
709  TRI = STI->getRegisterInfo();
710  MRI = &MF.getRegInfo();
711  DT = &getAnalysis<MachineDominatorTree>();
712  PDT = &getAnalysis<MachinePostDominatorTree>();
713  CI = &getAnalysis<MachineCycleInfoWrapperPass>().getCycleInfo();
714  MBFI = UseBlockFreqInfo ? &getAnalysis<MachineBlockFrequencyInfo>() : nullptr;
715  MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
716  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
717  RegClassInfo.runOnMachineFunction(MF);
718  TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
719  EnableSinkAndFold = PassConfig->getEnableSinkAndFold();
720
721  bool EverMadeChange = false;
722
723  while (true) {
724    bool MadeChange = false;
725
726    // Process all basic blocks.
727    CEBCandidates.clear();
728    ToSplit.clear();
729    for (auto &MBB: MF)
730      MadeChange |= ProcessBlock(MBB);
731
732    // If we have anything we marked as toSplit, split it now.
733    for (const auto &Pair : ToSplit) {
734      auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
735      if (NewSucc != nullptr) {
736        LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
737                          << printMBBReference(*Pair.first) << " -- "
738                          << printMBBReference(*NewSucc) << " -- "
739                          << printMBBReference(*Pair.second) << '\n');
740        if (MBFI)
741          MBFI->onEdgeSplit(*Pair.first, *NewSucc, *MBPI);
742
743        MadeChange = true;
744        ++NumSplit;
745        CI->splitCriticalEdge(Pair.first, Pair.second, NewSucc);
746      } else
747        LLVM_DEBUG(dbgs() << " *** Not legal to break critical edge\n");
748    }
749    // If this iteration over the code changed anything, keep iterating.
750    if (!MadeChange) break;
751    EverMadeChange = true;
752  }
753
754  if (SinkInstsIntoCycle) {
755    SmallVector<MachineCycle *, 8> Cycles(CI->toplevel_begin(),
756                                          CI->toplevel_end());
757    for (auto *Cycle : Cycles) {
758      MachineBasicBlock *Preheader = Cycle->getCyclePreheader();
759      if (!Preheader) {
760        LLVM_DEBUG(dbgs() << "CycleSink: Can't find preheader\n");
761        continue;
762      }
763      SmallVector<MachineInstr *, 8> Candidates;
764      FindCycleSinkCandidates(Cycle, Preheader, Candidates);
765
766      // Walk the candidates in reverse order so that we start with the use
767      // of a def-use chain, if there is any.
768      // TODO: Sort the candidates using a cost-model.
769      unsigned i = 0;
770      for (MachineInstr *I : llvm::reverse(Candidates)) {
771        if (i++ == SinkIntoCycleLimit) {
772          LLVM_DEBUG(dbgs() << "CycleSink:   Limit reached of instructions to "
773                               "be analysed.");
774          break;
775        }
776
777        if (!SinkIntoCycle(Cycle, *I))
778          break;
779        EverMadeChange = true;
780        ++NumCycleSunk;
781      }
782    }
783  }
784
785  HasStoreCache.clear();
786  StoreInstrCache.clear();
787
788  // Now clear any kill flags for recorded registers.
789  for (auto I : RegsToClearKillFlags)
790    MRI->clearKillFlags(I);
791  RegsToClearKillFlags.clear();
792
793  return EverMadeChange;
794}
795
796bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
797  if ((!EnableSinkAndFold && MBB.succ_size() <= 1) || MBB.empty())
798    return false;
799
800  // Don't bother sinking code out of unreachable blocks. In addition to being
801  // unprofitable, it can also lead to infinite looping, because in an
802  // unreachable cycle there may be nowhere to stop.
803  if (!DT->isReachableFromEntry(&MBB)) return false;
804
805  bool MadeChange = false;
806
807  // Cache all successors, sorted by frequency info and cycle depth.
808  AllSuccsCache AllSuccessors;
809
810  // Walk the basic block bottom-up.  Remember if we saw a store.
811  MachineBasicBlock::iterator I = MBB.end();
812  --I;
813  bool ProcessedBegin, SawStore = false;
814  do {
815    MachineInstr &MI = *I;  // The instruction to sink.
816
817    // Predecrement I (if it's not begin) so that it isn't invalidated by
818    // sinking.
819    ProcessedBegin = I == MBB.begin();
820    if (!ProcessedBegin)
821      --I;
822
823    if (MI.isDebugOrPseudoInstr()) {
824      if (MI.isDebugValue())
825        ProcessDbgInst(MI);
826      continue;
827    }
828
829    if (EnableSinkAndFold && PerformSinkAndFold(MI, &MBB)) {
830      MadeChange = true;
831      continue;
832    }
833
834    // Can't sink anything out of a block that has less than two successors.
835    if (MBB.succ_size() <= 1)
836      continue;
837
838    if (PerformTrivialForwardCoalescing(MI, &MBB)) {
839      MadeChange = true;
840      continue;
841    }
842
843    if (SinkInstruction(MI, SawStore, AllSuccessors)) {
844      ++NumSunk;
845      MadeChange = true;
846    }
847
848    // If we just processed the first instruction in the block, we're done.
849  } while (!ProcessedBegin);
850
851  SeenDbgUsers.clear();
852  SeenDbgVars.clear();
853  // recalculate the bb register pressure after sinking one BB.
854  CachedRegisterPressure.clear();
855  return MadeChange;
856}
857
858void MachineSinking::ProcessDbgInst(MachineInstr &MI) {
859  // When we see DBG_VALUEs for registers, record any vreg it reads, so that
860  // we know what to sink if the vreg def sinks.
861  assert(MI.isDebugValue() && "Expected DBG_VALUE for processing");
862
863  DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(),
864                    MI.getDebugLoc()->getInlinedAt());
865  bool SeenBefore = SeenDbgVars.contains(Var);
866
867  for (MachineOperand &MO : MI.debug_operands()) {
868    if (MO.isReg() && MO.getReg().isVirtual())
869      SeenDbgUsers[MO.getReg()].push_back(SeenDbgUser(&MI, SeenBefore));
870  }
871
872  // Record the variable for any DBG_VALUE, to avoid re-ordering any of them.
873  SeenDbgVars.insert(Var);
874}
875
876bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr &MI,
877                                                 MachineBasicBlock *From,
878                                                 MachineBasicBlock *To) {
879  // FIXME: Need much better heuristics.
880
881  // If the pass has already considered breaking this edge (during this pass
882  // through the function), then let's go ahead and break it. This means
883  // sinking multiple "cheap" instructions into the same block.
884  if (!CEBCandidates.insert(std::make_pair(From, To)).second)
885    return true;
886
887  if (!MI.isCopy() && !TII->isAsCheapAsAMove(MI))
888    return true;
889
890  if (From->isSuccessor(To) && MBPI->getEdgeProbability(From, To) <=
891      BranchProbability(SplitEdgeProbabilityThreshold, 100))
892    return true;
893
894  // MI is cheap, we probably don't want to break the critical edge for it.
895  // However, if this would allow some definitions of its source operands
896  // to be sunk then it's probably worth it.
897  for (const MachineOperand &MO : MI.all_uses()) {
898    Register Reg = MO.getReg();
899    if (Reg == 0)
900      continue;
901
902    // We don't move live definitions of physical registers,
903    // so sinking their uses won't enable any opportunities.
904    if (Reg.isPhysical())
905      continue;
906
907    // If this instruction is the only user of a virtual register,
908    // check if breaking the edge will enable sinking
909    // both this instruction and the defining instruction.
910    if (MRI->hasOneNonDBGUse(Reg)) {
911      // If the definition resides in same MBB,
912      // claim it's likely we can sink these together.
913      // If definition resides elsewhere, we aren't
914      // blocking it from being sunk so don't break the edge.
915      MachineInstr *DefMI = MRI->getVRegDef(Reg);
916      if (DefMI->getParent() == MI.getParent())
917        return true;
918    }
919  }
920
921  return false;
922}
923
924bool MachineSinking::PostponeSplitCriticalEdge(MachineInstr &MI,
925                                               MachineBasicBlock *FromBB,
926                                               MachineBasicBlock *ToBB,
927                                               bool BreakPHIEdge) {
928  if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
929    return false;
930
931  // Avoid breaking back edge. From == To means backedge for single BB cycle.
932  if (!SplitEdges || FromBB == ToBB)
933    return false;
934
935  MachineCycle *FromCycle = CI->getCycle(FromBB);
936  MachineCycle *ToCycle = CI->getCycle(ToBB);
937
938  // Check for backedges of more "complex" cycles.
939  if (FromCycle == ToCycle && FromCycle &&
940      (!FromCycle->isReducible() || FromCycle->getHeader() == ToBB))
941    return false;
942
943  // It's not always legal to break critical edges and sink the computation
944  // to the edge.
945  //
946  // %bb.1:
947  // v1024
948  // Beq %bb.3
949  // <fallthrough>
950  // %bb.2:
951  // ... no uses of v1024
952  // <fallthrough>
953  // %bb.3:
954  // ...
955  //       = v1024
956  //
957  // If %bb.1 -> %bb.3 edge is broken and computation of v1024 is inserted:
958  //
959  // %bb.1:
960  // ...
961  // Bne %bb.2
962  // %bb.4:
963  // v1024 =
964  // B %bb.3
965  // %bb.2:
966  // ... no uses of v1024
967  // <fallthrough>
968  // %bb.3:
969  // ...
970  //       = v1024
971  //
972  // This is incorrect since v1024 is not computed along the %bb.1->%bb.2->%bb.3
973  // flow. We need to ensure the new basic block where the computation is
974  // sunk to dominates all the uses.
975  // It's only legal to break critical edge and sink the computation to the
976  // new block if all the predecessors of "To", except for "From", are
977  // not dominated by "From". Given SSA property, this means these
978  // predecessors are dominated by "To".
979  //
980  // There is no need to do this check if all the uses are PHI nodes. PHI
981  // sources are only defined on the specific predecessor edges.
982  if (!BreakPHIEdge) {
983    for (MachineBasicBlock *Pred : ToBB->predecessors())
984      if (Pred != FromBB && !DT->dominates(ToBB, Pred))
985        return false;
986  }
987
988  ToSplit.insert(std::make_pair(FromBB, ToBB));
989
990  return true;
991}
992
993std::vector<unsigned> &
994MachineSinking::getBBRegisterPressure(const MachineBasicBlock &MBB) {
995  // Currently to save compiling time, MBB's register pressure will not change
996  // in one ProcessBlock iteration because of CachedRegisterPressure. but MBB's
997  // register pressure is changed after sinking any instructions into it.
998  // FIXME: need a accurate and cheap register pressure estiminate model here.
999  auto RP = CachedRegisterPressure.find(&MBB);
1000  if (RP != CachedRegisterPressure.end())
1001    return RP->second;
1002
1003  RegionPressure Pressure;
1004  RegPressureTracker RPTracker(Pressure);
1005
1006  // Initialize the register pressure tracker.
1007  RPTracker.init(MBB.getParent(), &RegClassInfo, nullptr, &MBB, MBB.end(),
1008                 /*TrackLaneMasks*/ false, /*TrackUntiedDefs=*/true);
1009
1010  for (MachineBasicBlock::const_iterator MII = MBB.instr_end(),
1011                                         MIE = MBB.instr_begin();
1012       MII != MIE; --MII) {
1013    const MachineInstr &MI = *std::prev(MII);
1014    if (MI.isDebugInstr() || MI.isPseudoProbe())
1015      continue;
1016    RegisterOperands RegOpers;
1017    RegOpers.collect(MI, *TRI, *MRI, false, false);
1018    RPTracker.recedeSkipDebugValues();
1019    assert(&*RPTracker.getPos() == &MI && "RPTracker sync error!");
1020    RPTracker.recede(RegOpers);
1021  }
1022
1023  RPTracker.closeRegion();
1024  auto It = CachedRegisterPressure.insert(
1025      std::make_pair(&MBB, RPTracker.getPressure().MaxSetPressure));
1026  return It.first->second;
1027}
1028
1029bool MachineSinking::registerPressureSetExceedsLimit(
1030    unsigned NRegs, const TargetRegisterClass *RC,
1031    const MachineBasicBlock &MBB) {
1032  unsigned Weight = NRegs * TRI->getRegClassWeight(RC).RegWeight;
1033  const int *PS = TRI->getRegClassPressureSets(RC);
1034  std::vector<unsigned> BBRegisterPressure = getBBRegisterPressure(MBB);
1035  for (; *PS != -1; PS++)
1036    if (Weight + BBRegisterPressure[*PS] >=
1037        TRI->getRegPressureSetLimit(*MBB.getParent(), *PS))
1038      return true;
1039  return false;
1040}
1041
1042/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
1043bool MachineSinking::isProfitableToSinkTo(Register Reg, MachineInstr &MI,
1044                                          MachineBasicBlock *MBB,
1045                                          MachineBasicBlock *SuccToSinkTo,
1046                                          AllSuccsCache &AllSuccessors) {
1047  assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
1048
1049  if (MBB == SuccToSinkTo)
1050    return false;
1051
1052  // It is profitable if SuccToSinkTo does not post dominate current block.
1053  if (!PDT->dominates(SuccToSinkTo, MBB))
1054    return true;
1055
1056  // It is profitable to sink an instruction from a deeper cycle to a shallower
1057  // cycle, even if the latter post-dominates the former (PR21115).
1058  if (CI->getCycleDepth(MBB) > CI->getCycleDepth(SuccToSinkTo))
1059    return true;
1060
1061  // Check if only use in post dominated block is PHI instruction.
1062  bool NonPHIUse = false;
1063  for (MachineInstr &UseInst : MRI->use_nodbg_instructions(Reg)) {
1064    MachineBasicBlock *UseBlock = UseInst.getParent();
1065    if (UseBlock == SuccToSinkTo && !UseInst.isPHI())
1066      NonPHIUse = true;
1067  }
1068  if (!NonPHIUse)
1069    return true;
1070
1071  // If SuccToSinkTo post dominates then also it may be profitable if MI
1072  // can further profitably sinked into another block in next round.
1073  bool BreakPHIEdge = false;
1074  // FIXME - If finding successor is compile time expensive then cache results.
1075  if (MachineBasicBlock *MBB2 =
1076          FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
1077    return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
1078
1079  MachineCycle *MCycle = CI->getCycle(MBB);
1080
1081  // If the instruction is not inside a cycle, it is not profitable to sink MI to
1082  // a post dominate block SuccToSinkTo.
1083  if (!MCycle)
1084    return false;
1085
1086  // If this instruction is inside a Cycle and sinking this instruction can make
1087  // more registers live range shorten, it is still prifitable.
1088  for (const MachineOperand &MO : MI.operands()) {
1089    // Ignore non-register operands.
1090    if (!MO.isReg())
1091      continue;
1092    Register Reg = MO.getReg();
1093    if (Reg == 0)
1094      continue;
1095
1096    if (Reg.isPhysical()) {
1097      // Don't handle non-constant and non-ignorable physical register uses.
1098      if (MO.isUse() && !MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
1099        return false;
1100      continue;
1101    }
1102
1103    // Users for the defs are all dominated by SuccToSinkTo.
1104    if (MO.isDef()) {
1105      // This def register's live range is shortened after sinking.
1106      bool LocalUse = false;
1107      if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB, BreakPHIEdge,
1108                                   LocalUse))
1109        return false;
1110    } else {
1111      MachineInstr *DefMI = MRI->getVRegDef(Reg);
1112      if (!DefMI)
1113        continue;
1114      MachineCycle *Cycle = CI->getCycle(DefMI->getParent());
1115      // DefMI is defined outside of cycle. There should be no live range
1116      // impact for this operand. Defination outside of cycle means:
1117      // 1: defination is outside of cycle.
1118      // 2: defination is in this cycle, but it is a PHI in the cycle header.
1119      if (Cycle != MCycle || (DefMI->isPHI() && Cycle && Cycle->isReducible() &&
1120                              Cycle->getHeader() == DefMI->getParent()))
1121        continue;
1122      // The DefMI is defined inside the cycle.
1123      // If sinking this operand makes some register pressure set exceed limit,
1124      // it is not profitable.
1125      if (registerPressureSetExceedsLimit(1, MRI->getRegClass(Reg),
1126                                          *SuccToSinkTo)) {
1127        LLVM_DEBUG(dbgs() << "register pressure exceed limit, not profitable.");
1128        return false;
1129      }
1130    }
1131  }
1132
1133  // If MI is in cycle and all its operands are alive across the whole cycle or
1134  // if no operand sinking make register pressure set exceed limit, it is
1135  // profitable to sink MI.
1136  return true;
1137}
1138
1139/// Get the sorted sequence of successors for this MachineBasicBlock, possibly
1140/// computing it if it was not already cached.
1141SmallVector<MachineBasicBlock *, 4> &
1142MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
1143                                       AllSuccsCache &AllSuccessors) const {
1144  // Do we have the sorted successors in cache ?
1145  auto Succs = AllSuccessors.find(MBB);
1146  if (Succs != AllSuccessors.end())
1147    return Succs->second;
1148
1149  SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->successors());
1150
1151  // Handle cases where sinking can happen but where the sink point isn't a
1152  // successor. For example:
1153  //
1154  //   x = computation
1155  //   if () {} else {}
1156  //   use x
1157  //
1158  for (MachineDomTreeNode *DTChild : DT->getNode(MBB)->children()) {
1159    // DomTree children of MBB that have MBB as immediate dominator are added.
1160    if (DTChild->getIDom()->getBlock() == MI.getParent() &&
1161        // Skip MBBs already added to the AllSuccs vector above.
1162        !MBB->isSuccessor(DTChild->getBlock()))
1163      AllSuccs.push_back(DTChild->getBlock());
1164  }
1165
1166  // Sort Successors according to their cycle depth or block frequency info.
1167  llvm::stable_sort(
1168      AllSuccs, [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
1169        uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
1170        uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
1171        bool HasBlockFreq = LHSFreq != 0 || RHSFreq != 0;
1172        return HasBlockFreq ? LHSFreq < RHSFreq
1173                            : CI->getCycleDepth(L) < CI->getCycleDepth(R);
1174      });
1175
1176  auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
1177
1178  return it.first->second;
1179}
1180
1181/// FindSuccToSinkTo - Find a successor to sink this instruction to.
1182MachineBasicBlock *
1183MachineSinking::FindSuccToSinkTo(MachineInstr &MI, MachineBasicBlock *MBB,
1184                                 bool &BreakPHIEdge,
1185                                 AllSuccsCache &AllSuccessors) {
1186  assert (MBB && "Invalid MachineBasicBlock!");
1187
1188  // loop over all the operands of the specified instruction.  If there is
1189  // anything we can't handle, bail out.
1190
1191  // SuccToSinkTo - This is the successor to sink this instruction to, once we
1192  // decide.
1193  MachineBasicBlock *SuccToSinkTo = nullptr;
1194  for (const MachineOperand &MO : MI.operands()) {
1195    if (!MO.isReg()) continue;  // Ignore non-register operands.
1196
1197    Register Reg = MO.getReg();
1198    if (Reg == 0) continue;
1199
1200    if (Reg.isPhysical()) {
1201      if (MO.isUse()) {
1202        // If the physreg has no defs anywhere, it's just an ambient register
1203        // and we can freely move its uses. Alternatively, if it's allocatable,
1204        // it could get allocated to something with a def during allocation.
1205        if (!MRI->isConstantPhysReg(Reg) && !TII->isIgnorableUse(MO))
1206          return nullptr;
1207      } else if (!MO.isDead()) {
1208        // A def that isn't dead. We can't move it.
1209        return nullptr;
1210      }
1211    } else {
1212      // Virtual register uses are always safe to sink.
1213      if (MO.isUse()) continue;
1214
1215      // If it's not safe to move defs of the register class, then abort.
1216      if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
1217        return nullptr;
1218
1219      // Virtual register defs can only be sunk if all their uses are in blocks
1220      // dominated by one of the successors.
1221      if (SuccToSinkTo) {
1222        // If a previous operand picked a block to sink to, then this operand
1223        // must be sinkable to the same block.
1224        bool LocalUse = false;
1225        if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, MBB,
1226                                     BreakPHIEdge, LocalUse))
1227          return nullptr;
1228
1229        continue;
1230      }
1231
1232      // Otherwise, we should look at all the successors and decide which one
1233      // we should sink to. If we have reliable block frequency information
1234      // (frequency != 0) available, give successors with smaller frequencies
1235      // higher priority, otherwise prioritize smaller cycle depths.
1236      for (MachineBasicBlock *SuccBlock :
1237           GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
1238        bool LocalUse = false;
1239        if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
1240                                    BreakPHIEdge, LocalUse)) {
1241          SuccToSinkTo = SuccBlock;
1242          break;
1243        }
1244        if (LocalUse)
1245          // Def is used locally, it's never safe to move this def.
1246          return nullptr;
1247      }
1248
1249      // If we couldn't find a block to sink to, ignore this instruction.
1250      if (!SuccToSinkTo)
1251        return nullptr;
1252      if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
1253        return nullptr;
1254    }
1255  }
1256
1257  // It is not possible to sink an instruction into its own block.  This can
1258  // happen with cycles.
1259  if (MBB == SuccToSinkTo)
1260    return nullptr;
1261
1262  // It's not safe to sink instructions to EH landing pad. Control flow into
1263  // landing pad is implicitly defined.
1264  if (SuccToSinkTo && SuccToSinkTo->isEHPad())
1265    return nullptr;
1266
1267  // It ought to be okay to sink instructions into an INLINEASM_BR target, but
1268  // only if we make sure that MI occurs _before_ an INLINEASM_BR instruction in
1269  // the source block (which this code does not yet do). So for now, forbid
1270  // doing so.
1271  if (SuccToSinkTo && SuccToSinkTo->isInlineAsmBrIndirectTarget())
1272    return nullptr;
1273
1274  if (SuccToSinkTo && !TII->isSafeToSink(MI, SuccToSinkTo, CI))
1275    return nullptr;
1276
1277  return SuccToSinkTo;
1278}
1279
1280/// Return true if MI is likely to be usable as a memory operation by the
1281/// implicit null check optimization.
1282///
1283/// This is a "best effort" heuristic, and should not be relied upon for
1284/// correctness.  This returning true does not guarantee that the implicit null
1285/// check optimization is legal over MI, and this returning false does not
1286/// guarantee MI cannot possibly be used to do a null check.
1287static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
1288                                             const TargetInstrInfo *TII,
1289                                             const TargetRegisterInfo *TRI) {
1290  using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate;
1291
1292  auto *MBB = MI.getParent();
1293  if (MBB->pred_size() != 1)
1294    return false;
1295
1296  auto *PredMBB = *MBB->pred_begin();
1297  auto *PredBB = PredMBB->getBasicBlock();
1298
1299  // Frontends that don't use implicit null checks have no reason to emit
1300  // branches with make.implicit metadata, and this function should always
1301  // return false for them.
1302  if (!PredBB ||
1303      !PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
1304    return false;
1305
1306  const MachineOperand *BaseOp;
1307  int64_t Offset;
1308  bool OffsetIsScalable;
1309  if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI))
1310    return false;
1311
1312  if (!BaseOp->isReg())
1313    return false;
1314
1315  if (!(MI.mayLoad() && !MI.isPredicable()))
1316    return false;
1317
1318  MachineBranchPredicate MBP;
1319  if (TII->analyzeBranchPredicate(*PredMBB, MBP, false))
1320    return false;
1321
1322  return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
1323         (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
1324          MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
1325         MBP.LHS.getReg() == BaseOp->getReg();
1326}
1327
1328/// If the sunk instruction is a copy, try to forward the copy instead of
1329/// leaving an 'undef' DBG_VALUE in the original location. Don't do this if
1330/// there's any subregister weirdness involved. Returns true if copy
1331/// propagation occurred.
1332static bool attemptDebugCopyProp(MachineInstr &SinkInst, MachineInstr &DbgMI,
1333                                 Register Reg) {
1334  const MachineRegisterInfo &MRI = SinkInst.getMF()->getRegInfo();
1335  const TargetInstrInfo &TII = *SinkInst.getMF()->getSubtarget().getInstrInfo();
1336
1337  // Copy DBG_VALUE operand and set the original to undef. We then check to
1338  // see whether this is something that can be copy-forwarded. If it isn't,
1339  // continue around the loop.
1340
1341  const MachineOperand *SrcMO = nullptr, *DstMO = nullptr;
1342  auto CopyOperands = TII.isCopyInstr(SinkInst);
1343  if (!CopyOperands)
1344    return false;
1345  SrcMO = CopyOperands->Source;
1346  DstMO = CopyOperands->Destination;
1347
1348  // Check validity of forwarding this copy.
1349  bool PostRA = MRI.getNumVirtRegs() == 0;
1350
1351  // Trying to forward between physical and virtual registers is too hard.
1352  if (Reg.isVirtual() != SrcMO->getReg().isVirtual())
1353    return false;
1354
1355  // Only try virtual register copy-forwarding before regalloc, and physical
1356  // register copy-forwarding after regalloc.
1357  bool arePhysRegs = !Reg.isVirtual();
1358  if (arePhysRegs != PostRA)
1359    return false;
1360
1361  // Pre-regalloc, only forward if all subregisters agree (or there are no
1362  // subregs at all). More analysis might recover some forwardable copies.
1363  if (!PostRA)
1364    for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg))
1365      if (DbgMO.getSubReg() != SrcMO->getSubReg() ||
1366          DbgMO.getSubReg() != DstMO->getSubReg())
1367        return false;
1368
1369  // Post-regalloc, we may be sinking a DBG_VALUE of a sub or super-register
1370  // of this copy. Only forward the copy if the DBG_VALUE operand exactly
1371  // matches the copy destination.
1372  if (PostRA && Reg != DstMO->getReg())
1373    return false;
1374
1375  for (auto &DbgMO : DbgMI.getDebugOperandsForReg(Reg)) {
1376    DbgMO.setReg(SrcMO->getReg());
1377    DbgMO.setSubReg(SrcMO->getSubReg());
1378  }
1379  return true;
1380}
1381
1382using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>;
1383/// Sink an instruction and its associated debug instructions.
1384static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
1385                        MachineBasicBlock::iterator InsertPos,
1386                        ArrayRef<MIRegs> DbgValuesToSink) {
1387  // If we cannot find a location to use (merge with), then we erase the debug
1388  // location to prevent debug-info driven tools from potentially reporting
1389  // wrong location information.
1390  if (!SuccToSinkTo.empty() && InsertPos != SuccToSinkTo.end())
1391    MI.setDebugLoc(DILocation::getMergedLocation(MI.getDebugLoc(),
1392                                                 InsertPos->getDebugLoc()));
1393  else
1394    MI.setDebugLoc(DebugLoc());
1395
1396  // Move the instruction.
1397  MachineBasicBlock *ParentBlock = MI.getParent();
1398  SuccToSinkTo.splice(InsertPos, ParentBlock, MI,
1399                      ++MachineBasicBlock::iterator(MI));
1400
1401  // Sink a copy of debug users to the insert position. Mark the original
1402  // DBG_VALUE location as 'undef', indicating that any earlier variable
1403  // location should be terminated as we've optimised away the value at this
1404  // point.
1405  for (const auto &DbgValueToSink : DbgValuesToSink) {
1406    MachineInstr *DbgMI = DbgValueToSink.first;
1407    MachineInstr *NewDbgMI = DbgMI->getMF()->CloneMachineInstr(DbgMI);
1408    SuccToSinkTo.insert(InsertPos, NewDbgMI);
1409
1410    bool PropagatedAllSunkOps = true;
1411    for (unsigned Reg : DbgValueToSink.second) {
1412      if (DbgMI->hasDebugOperandForReg(Reg)) {
1413        if (!attemptDebugCopyProp(MI, *DbgMI, Reg)) {
1414          PropagatedAllSunkOps = false;
1415          break;
1416        }
1417      }
1418    }
1419    if (!PropagatedAllSunkOps)
1420      DbgMI->setDebugValueUndef();
1421  }
1422}
1423
1424/// hasStoreBetween - check if there is store betweeen straight line blocks From
1425/// and To.
1426bool MachineSinking::hasStoreBetween(MachineBasicBlock *From,
1427                                     MachineBasicBlock *To, MachineInstr &MI) {
1428  // Make sure From and To are in straight line which means From dominates To
1429  // and To post dominates From.
1430  if (!DT->dominates(From, To) || !PDT->dominates(To, From))
1431    return true;
1432
1433  auto BlockPair = std::make_pair(From, To);
1434
1435  // Does these two blocks pair be queried before and have a definite cached
1436  // result?
1437  if (auto It = HasStoreCache.find(BlockPair); It != HasStoreCache.end())
1438    return It->second;
1439
1440  if (auto It = StoreInstrCache.find(BlockPair); It != StoreInstrCache.end())
1441    return llvm::any_of(It->second, [&](MachineInstr *I) {
1442      return I->mayAlias(AA, MI, false);
1443    });
1444
1445  bool SawStore = false;
1446  bool HasAliasedStore = false;
1447  DenseSet<MachineBasicBlock *> HandledBlocks;
1448  DenseSet<MachineBasicBlock *> HandledDomBlocks;
1449  // Go through all reachable blocks from From.
1450  for (MachineBasicBlock *BB : depth_first(From)) {
1451    // We insert the instruction at the start of block To, so no need to worry
1452    // about stores inside To.
1453    // Store in block From should be already considered when just enter function
1454    // SinkInstruction.
1455    if (BB == To || BB == From)
1456      continue;
1457
1458    // We already handle this BB in previous iteration.
1459    if (HandledBlocks.count(BB))
1460      continue;
1461
1462    HandledBlocks.insert(BB);
1463    // To post dominates BB, it must be a path from block From.
1464    if (PDT->dominates(To, BB)) {
1465      if (!HandledDomBlocks.count(BB))
1466        HandledDomBlocks.insert(BB);
1467
1468      // If this BB is too big or the block number in straight line between From
1469      // and To is too big, stop searching to save compiling time.
1470      if (BB->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold) ||
1471          HandledDomBlocks.size() > SinkLoadBlocksThreshold) {
1472        for (auto *DomBB : HandledDomBlocks) {
1473          if (DomBB != BB && DT->dominates(DomBB, BB))
1474            HasStoreCache[std::make_pair(DomBB, To)] = true;
1475          else if(DomBB != BB && DT->dominates(BB, DomBB))
1476            HasStoreCache[std::make_pair(From, DomBB)] = true;
1477        }
1478        HasStoreCache[BlockPair] = true;
1479        return true;
1480      }
1481
1482      for (MachineInstr &I : *BB) {
1483        // Treat as alias conservatively for a call or an ordered memory
1484        // operation.
1485        if (I.isCall() || I.hasOrderedMemoryRef()) {
1486          for (auto *DomBB : HandledDomBlocks) {
1487            if (DomBB != BB && DT->dominates(DomBB, BB))
1488              HasStoreCache[std::make_pair(DomBB, To)] = true;
1489            else if(DomBB != BB && DT->dominates(BB, DomBB))
1490              HasStoreCache[std::make_pair(From, DomBB)] = true;
1491          }
1492          HasStoreCache[BlockPair] = true;
1493          return true;
1494        }
1495
1496        if (I.mayStore()) {
1497          SawStore = true;
1498          // We still have chance to sink MI if all stores between are not
1499          // aliased to MI.
1500          // Cache all store instructions, so that we don't need to go through
1501          // all From reachable blocks for next load instruction.
1502          if (I.mayAlias(AA, MI, false))
1503            HasAliasedStore = true;
1504          StoreInstrCache[BlockPair].push_back(&I);
1505        }
1506      }
1507    }
1508  }
1509  // If there is no store at all, cache the result.
1510  if (!SawStore)
1511    HasStoreCache[BlockPair] = false;
1512  return HasAliasedStore;
1513}
1514
1515/// Sink instructions into cycles if profitable. This especially tries to
1516/// prevent register spills caused by register pressure if there is little to no
1517/// overhead moving instructions into cycles.
1518bool MachineSinking::SinkIntoCycle(MachineCycle *Cycle, MachineInstr &I) {
1519  LLVM_DEBUG(dbgs() << "CycleSink: Finding sink block for: " << I);
1520  MachineBasicBlock *Preheader = Cycle->getCyclePreheader();
1521  assert(Preheader && "Cycle sink needs a preheader block");
1522  MachineBasicBlock *SinkBlock = nullptr;
1523  bool CanSink = true;
1524  const MachineOperand &MO = I.getOperand(0);
1525
1526  for (MachineInstr &MI : MRI->use_instructions(MO.getReg())) {
1527    LLVM_DEBUG(dbgs() << "CycleSink:   Analysing use: " << MI);
1528    if (!Cycle->contains(MI.getParent())) {
1529      LLVM_DEBUG(dbgs() << "CycleSink:   Use not in cycle, can't sink.\n");
1530      CanSink = false;
1531      break;
1532    }
1533
1534    // FIXME: Come up with a proper cost model that estimates whether sinking
1535    // the instruction (and thus possibly executing it on every cycle
1536    // iteration) is more expensive than a register.
1537    // For now assumes that copies are cheap and thus almost always worth it.
1538    if (!MI.isCopy()) {
1539      LLVM_DEBUG(dbgs() << "CycleSink:   Use is not a copy\n");
1540      CanSink = false;
1541      break;
1542    }
1543    if (!SinkBlock) {
1544      SinkBlock = MI.getParent();
1545      LLVM_DEBUG(dbgs() << "CycleSink:   Setting sink block to: "
1546                        << printMBBReference(*SinkBlock) << "\n");
1547      continue;
1548    }
1549    SinkBlock = DT->findNearestCommonDominator(SinkBlock, MI.getParent());
1550    if (!SinkBlock) {
1551      LLVM_DEBUG(dbgs() << "CycleSink:   Can't find nearest dominator\n");
1552      CanSink = false;
1553      break;
1554    }
1555    LLVM_DEBUG(dbgs() << "CycleSink:   Setting nearest common dom block: " <<
1556               printMBBReference(*SinkBlock) << "\n");
1557  }
1558
1559  if (!CanSink) {
1560    LLVM_DEBUG(dbgs() << "CycleSink: Can't sink instruction.\n");
1561    return false;
1562  }
1563  if (!SinkBlock) {
1564    LLVM_DEBUG(dbgs() << "CycleSink: Not sinking, can't find sink block.\n");
1565    return false;
1566  }
1567  if (SinkBlock == Preheader) {
1568    LLVM_DEBUG(
1569        dbgs() << "CycleSink: Not sinking, sink block is the preheader\n");
1570    return false;
1571  }
1572  if (SinkBlock->sizeWithoutDebugLargerThan(SinkLoadInstsPerBlockThreshold)) {
1573    LLVM_DEBUG(
1574        dbgs() << "CycleSink: Not Sinking, block too large to analyse.\n");
1575    return false;
1576  }
1577
1578  LLVM_DEBUG(dbgs() << "CycleSink: Sinking instruction!\n");
1579  SinkBlock->splice(SinkBlock->SkipPHIsAndLabels(SinkBlock->begin()), Preheader,
1580                    I);
1581
1582  // Conservatively clear any kill flags on uses of sunk instruction
1583  for (MachineOperand &MO : I.operands()) {
1584    if (MO.isReg() && MO.readsReg())
1585      RegsToClearKillFlags.insert(MO.getReg());
1586  }
1587
1588  // The instruction is moved from its basic block, so do not retain the
1589  // debug information.
1590  assert(!I.isDebugInstr() && "Should not sink debug inst");
1591  I.setDebugLoc(DebugLoc());
1592  return true;
1593}
1594
1595/// SinkInstruction - Determine whether it is safe to sink the specified machine
1596/// instruction out of its current block into a successor.
1597bool MachineSinking::SinkInstruction(MachineInstr &MI, bool &SawStore,
1598                                     AllSuccsCache &AllSuccessors) {
1599  // Don't sink instructions that the target prefers not to sink.
1600  if (!TII->shouldSink(MI))
1601    return false;
1602
1603  // Check if it's safe to move the instruction.
1604  if (!MI.isSafeToMove(AA, SawStore))
1605    return false;
1606
1607  // Convergent operations may not be made control-dependent on additional
1608  // values.
1609  if (MI.isConvergent())
1610    return false;
1611
1612  // Don't break implicit null checks.  This is a performance heuristic, and not
1613  // required for correctness.
1614  if (SinkingPreventsImplicitNullCheck(MI, TII, TRI))
1615    return false;
1616
1617  // FIXME: This should include support for sinking instructions within the
1618  // block they are currently in to shorten the live ranges.  We often get
1619  // instructions sunk into the top of a large block, but it would be better to
1620  // also sink them down before their first use in the block.  This xform has to
1621  // be careful not to *increase* register pressure though, e.g. sinking
1622  // "x = y + z" down if it kills y and z would increase the live ranges of y
1623  // and z and only shrink the live range of x.
1624
1625  bool BreakPHIEdge = false;
1626  MachineBasicBlock *ParentBlock = MI.getParent();
1627  MachineBasicBlock *SuccToSinkTo =
1628      FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
1629
1630  // If there are no outputs, it must have side-effects.
1631  if (!SuccToSinkTo)
1632    return false;
1633
1634  // If the instruction to move defines a dead physical register which is live
1635  // when leaving the basic block, don't move it because it could turn into a
1636  // "zombie" define of that preg. E.g., EFLAGS.
1637  for (const MachineOperand &MO : MI.all_defs()) {
1638    Register Reg = MO.getReg();
1639    if (Reg == 0 || !Reg.isPhysical())
1640      continue;
1641    if (SuccToSinkTo->isLiveIn(Reg))
1642      return false;
1643  }
1644
1645  LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccToSinkTo);
1646
1647  // If the block has multiple predecessors, this is a critical edge.
1648  // Decide if we can sink along it or need to break the edge.
1649  if (SuccToSinkTo->pred_size() > 1) {
1650    // We cannot sink a load across a critical edge - there may be stores in
1651    // other code paths.
1652    bool TryBreak = false;
1653    bool Store =
1654        MI.mayLoad() ? hasStoreBetween(ParentBlock, SuccToSinkTo, MI) : true;
1655    if (!MI.isSafeToMove(AA, Store)) {
1656      LLVM_DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
1657      TryBreak = true;
1658    }
1659
1660    // We don't want to sink across a critical edge if we don't dominate the
1661    // successor. We could be introducing calculations to new code paths.
1662    if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
1663      LLVM_DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
1664      TryBreak = true;
1665    }
1666
1667    // Don't sink instructions into a cycle.
1668    if (!TryBreak && CI->getCycle(SuccToSinkTo) &&
1669        (!CI->getCycle(SuccToSinkTo)->isReducible() ||
1670         CI->getCycle(SuccToSinkTo)->getHeader() == SuccToSinkTo)) {
1671      LLVM_DEBUG(dbgs() << " *** NOTE: cycle header found\n");
1672      TryBreak = true;
1673    }
1674
1675    // Otherwise we are OK with sinking along a critical edge.
1676    if (!TryBreak)
1677      LLVM_DEBUG(dbgs() << "Sinking along critical edge.\n");
1678    else {
1679      // Mark this edge as to be split.
1680      // If the edge can actually be split, the next iteration of the main loop
1681      // will sink MI in the newly created block.
1682      bool Status =
1683        PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
1684      if (!Status)
1685        LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1686                             "break critical edge\n");
1687      // The instruction will not be sunk this time.
1688      return false;
1689    }
1690  }
1691
1692  if (BreakPHIEdge) {
1693    // BreakPHIEdge is true if all the uses are in the successor MBB being
1694    // sunken into and they are all PHI nodes. In this case, machine-sink must
1695    // break the critical edge first.
1696    bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
1697                                            SuccToSinkTo, BreakPHIEdge);
1698    if (!Status)
1699      LLVM_DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
1700                           "break critical edge\n");
1701    // The instruction will not be sunk this time.
1702    return false;
1703  }
1704
1705  // Determine where to insert into. Skip phi nodes.
1706  MachineBasicBlock::iterator InsertPos =
1707      SuccToSinkTo->SkipPHIsAndLabels(SuccToSinkTo->begin());
1708  if (blockPrologueInterferes(SuccToSinkTo, InsertPos, MI, TRI, TII, MRI)) {
1709    LLVM_DEBUG(dbgs() << " *** Not sinking: prologue interference\n");
1710    return false;
1711  }
1712
1713  // Collect debug users of any vreg that this inst defines.
1714  SmallVector<MIRegs, 4> DbgUsersToSink;
1715  for (auto &MO : MI.all_defs()) {
1716    if (!MO.getReg().isVirtual())
1717      continue;
1718    if (!SeenDbgUsers.count(MO.getReg()))
1719      continue;
1720
1721    // Sink any users that don't pass any other DBG_VALUEs for this variable.
1722    auto &Users = SeenDbgUsers[MO.getReg()];
1723    for (auto &User : Users) {
1724      MachineInstr *DbgMI = User.getPointer();
1725      if (User.getInt()) {
1726        // This DBG_VALUE would re-order assignments. If we can't copy-propagate
1727        // it, it can't be recovered. Set it undef.
1728        if (!attemptDebugCopyProp(MI, *DbgMI, MO.getReg()))
1729          DbgMI->setDebugValueUndef();
1730      } else {
1731        DbgUsersToSink.push_back(
1732            {DbgMI, SmallVector<unsigned, 2>(1, MO.getReg())});
1733      }
1734    }
1735  }
1736
1737  // After sinking, some debug users may not be dominated any more. If possible,
1738  // copy-propagate their operands. As it's expensive, don't do this if there's
1739  // no debuginfo in the program.
1740  if (MI.getMF()->getFunction().getSubprogram() && MI.isCopy())
1741    SalvageUnsunkDebugUsersOfCopy(MI, SuccToSinkTo);
1742
1743  performSink(MI, *SuccToSinkTo, InsertPos, DbgUsersToSink);
1744
1745  // Conservatively, clear any kill flags, since it's possible that they are no
1746  // longer correct.
1747  // Note that we have to clear the kill flags for any register this instruction
1748  // uses as we may sink over another instruction which currently kills the
1749  // used registers.
1750  for (MachineOperand &MO : MI.all_uses())
1751    RegsToClearKillFlags.insert(MO.getReg()); // Remember to clear kill flags.
1752
1753  return true;
1754}
1755
1756void MachineSinking::SalvageUnsunkDebugUsersOfCopy(
1757    MachineInstr &MI, MachineBasicBlock *TargetBlock) {
1758  assert(MI.isCopy());
1759  assert(MI.getOperand(1).isReg());
1760
1761  // Enumerate all users of vreg operands that are def'd. Skip those that will
1762  // be sunk. For the rest, if they are not dominated by the block we will sink
1763  // MI into, propagate the copy source to them.
1764  SmallVector<MachineInstr *, 4> DbgDefUsers;
1765  SmallVector<Register, 4> DbgUseRegs;
1766  const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1767  for (auto &MO : MI.all_defs()) {
1768    if (!MO.getReg().isVirtual())
1769      continue;
1770    DbgUseRegs.push_back(MO.getReg());
1771    for (auto &User : MRI.use_instructions(MO.getReg())) {
1772      if (!User.isDebugValue() || DT->dominates(TargetBlock, User.getParent()))
1773        continue;
1774
1775      // If is in same block, will either sink or be use-before-def.
1776      if (User.getParent() == MI.getParent())
1777        continue;
1778
1779      assert(User.hasDebugOperandForReg(MO.getReg()) &&
1780             "DBG_VALUE user of vreg, but has no operand for it?");
1781      DbgDefUsers.push_back(&User);
1782    }
1783  }
1784
1785  // Point the users of this copy that are no longer dominated, at the source
1786  // of the copy.
1787  for (auto *User : DbgDefUsers) {
1788    for (auto &Reg : DbgUseRegs) {
1789      for (auto &DbgOp : User->getDebugOperandsForReg(Reg)) {
1790        DbgOp.setReg(MI.getOperand(1).getReg());
1791        DbgOp.setSubReg(MI.getOperand(1).getSubReg());
1792      }
1793    }
1794  }
1795}
1796
1797//===----------------------------------------------------------------------===//
1798// This pass is not intended to be a replacement or a complete alternative
1799// for the pre-ra machine sink pass. It is only designed to sink COPY
1800// instructions which should be handled after RA.
1801//
1802// This pass sinks COPY instructions into a successor block, if the COPY is not
1803// used in the current block and the COPY is live-in to a single successor
1804// (i.e., doesn't require the COPY to be duplicated).  This avoids executing the
1805// copy on paths where their results aren't needed.  This also exposes
1806// additional opportunites for dead copy elimination and shrink wrapping.
1807//
1808// These copies were either not handled by or are inserted after the MachineSink
1809// pass. As an example of the former case, the MachineSink pass cannot sink
1810// COPY instructions with allocatable source registers; for AArch64 these type
1811// of copy instructions are frequently used to move function parameters (PhyReg)
1812// into virtual registers in the entry block.
1813//
1814// For the machine IR below, this pass will sink %w19 in the entry into its
1815// successor (%bb.1) because %w19 is only live-in in %bb.1.
1816// %bb.0:
1817//   %wzr = SUBSWri %w1, 1
1818//   %w19 = COPY %w0
1819//   Bcc 11, %bb.2
1820// %bb.1:
1821//   Live Ins: %w19
1822//   BL @fun
1823//   %w0 = ADDWrr %w0, %w19
1824//   RET %w0
1825// %bb.2:
1826//   %w0 = COPY %wzr
1827//   RET %w0
1828// As we sink %w19 (CSR in AArch64) into %bb.1, the shrink-wrapping pass will be
1829// able to see %bb.0 as a candidate.
1830//===----------------------------------------------------------------------===//
1831namespace {
1832
1833class PostRAMachineSinking : public MachineFunctionPass {
1834public:
1835  bool runOnMachineFunction(MachineFunction &MF) override;
1836
1837  static char ID;
1838  PostRAMachineSinking() : MachineFunctionPass(ID) {}
1839  StringRef getPassName() const override { return "PostRA Machine Sink"; }
1840
1841  void getAnalysisUsage(AnalysisUsage &AU) const override {
1842    AU.setPreservesCFG();
1843    MachineFunctionPass::getAnalysisUsage(AU);
1844  }
1845
1846  MachineFunctionProperties getRequiredProperties() const override {
1847    return MachineFunctionProperties().set(
1848        MachineFunctionProperties::Property::NoVRegs);
1849  }
1850
1851private:
1852  /// Track which register units have been modified and used.
1853  LiveRegUnits ModifiedRegUnits, UsedRegUnits;
1854
1855  /// Track DBG_VALUEs of (unmodified) register units. Each DBG_VALUE has an
1856  /// entry in this map for each unit it touches. The DBG_VALUE's entry
1857  /// consists of a pointer to the instruction itself, and a vector of registers
1858  /// referred to by the instruction that overlap the key register unit.
1859  DenseMap<unsigned, SmallVector<MIRegs, 2>> SeenDbgInstrs;
1860
1861  /// Sink Copy instructions unused in the same block close to their uses in
1862  /// successors.
1863  bool tryToSinkCopy(MachineBasicBlock &BB, MachineFunction &MF,
1864                     const TargetRegisterInfo *TRI, const TargetInstrInfo *TII);
1865};
1866} // namespace
1867
1868char PostRAMachineSinking::ID = 0;
1869char &llvm::PostRAMachineSinkingID = PostRAMachineSinking::ID;
1870
1871INITIALIZE_PASS(PostRAMachineSinking, "postra-machine-sink",
1872                "PostRA Machine Sink", false, false)
1873
1874static bool aliasWithRegsInLiveIn(MachineBasicBlock &MBB, unsigned Reg,
1875                                  const TargetRegisterInfo *TRI) {
1876  LiveRegUnits LiveInRegUnits(*TRI);
1877  LiveInRegUnits.addLiveIns(MBB);
1878  return !LiveInRegUnits.available(Reg);
1879}
1880
1881static MachineBasicBlock *
1882getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
1883                      const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
1884                      unsigned Reg, const TargetRegisterInfo *TRI) {
1885  // Try to find a single sinkable successor in which Reg is live-in.
1886  MachineBasicBlock *BB = nullptr;
1887  for (auto *SI : SinkableBBs) {
1888    if (aliasWithRegsInLiveIn(*SI, Reg, TRI)) {
1889      // If BB is set here, Reg is live-in to at least two sinkable successors,
1890      // so quit.
1891      if (BB)
1892        return nullptr;
1893      BB = SI;
1894    }
1895  }
1896  // Reg is not live-in to any sinkable successors.
1897  if (!BB)
1898    return nullptr;
1899
1900  // Check if any register aliased with Reg is live-in in other successors.
1901  for (auto *SI : CurBB.successors()) {
1902    if (!SinkableBBs.count(SI) && aliasWithRegsInLiveIn(*SI, Reg, TRI))
1903      return nullptr;
1904  }
1905  return BB;
1906}
1907
1908static MachineBasicBlock *
1909getSingleLiveInSuccBB(MachineBasicBlock &CurBB,
1910                      const SmallPtrSetImpl<MachineBasicBlock *> &SinkableBBs,
1911                      ArrayRef<unsigned> DefedRegsInCopy,
1912                      const TargetRegisterInfo *TRI) {
1913  MachineBasicBlock *SingleBB = nullptr;
1914  for (auto DefReg : DefedRegsInCopy) {
1915    MachineBasicBlock *BB =
1916        getSingleLiveInSuccBB(CurBB, SinkableBBs, DefReg, TRI);
1917    if (!BB || (SingleBB && SingleBB != BB))
1918      return nullptr;
1919    SingleBB = BB;
1920  }
1921  return SingleBB;
1922}
1923
1924static void clearKillFlags(MachineInstr *MI, MachineBasicBlock &CurBB,
1925                           SmallVectorImpl<unsigned> &UsedOpsInCopy,
1926                           LiveRegUnits &UsedRegUnits,
1927                           const TargetRegisterInfo *TRI) {
1928  for (auto U : UsedOpsInCopy) {
1929    MachineOperand &MO = MI->getOperand(U);
1930    Register SrcReg = MO.getReg();
1931    if (!UsedRegUnits.available(SrcReg)) {
1932      MachineBasicBlock::iterator NI = std::next(MI->getIterator());
1933      for (MachineInstr &UI : make_range(NI, CurBB.end())) {
1934        if (UI.killsRegister(SrcReg, TRI)) {
1935          UI.clearRegisterKills(SrcReg, TRI);
1936          MO.setIsKill(true);
1937          break;
1938        }
1939      }
1940    }
1941  }
1942}
1943
1944static void updateLiveIn(MachineInstr *MI, MachineBasicBlock *SuccBB,
1945                         SmallVectorImpl<unsigned> &UsedOpsInCopy,
1946                         SmallVectorImpl<unsigned> &DefedRegsInCopy) {
1947  MachineFunction &MF = *SuccBB->getParent();
1948  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1949  for (unsigned DefReg : DefedRegsInCopy)
1950    for (MCPhysReg S : TRI->subregs_inclusive(DefReg))
1951      SuccBB->removeLiveIn(S);
1952  for (auto U : UsedOpsInCopy) {
1953    Register SrcReg = MI->getOperand(U).getReg();
1954    LaneBitmask Mask;
1955    for (MCRegUnitMaskIterator S(SrcReg, TRI); S.isValid(); ++S)
1956      Mask |= (*S).second;
1957    SuccBB->addLiveIn(SrcReg, Mask);
1958  }
1959  SuccBB->sortUniqueLiveIns();
1960}
1961
1962static bool hasRegisterDependency(MachineInstr *MI,
1963                                  SmallVectorImpl<unsigned> &UsedOpsInCopy,
1964                                  SmallVectorImpl<unsigned> &DefedRegsInCopy,
1965                                  LiveRegUnits &ModifiedRegUnits,
1966                                  LiveRegUnits &UsedRegUnits) {
1967  bool HasRegDependency = false;
1968  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1969    MachineOperand &MO = MI->getOperand(i);
1970    if (!MO.isReg())
1971      continue;
1972    Register Reg = MO.getReg();
1973    if (!Reg)
1974      continue;
1975    if (MO.isDef()) {
1976      if (!ModifiedRegUnits.available(Reg) || !UsedRegUnits.available(Reg)) {
1977        HasRegDependency = true;
1978        break;
1979      }
1980      DefedRegsInCopy.push_back(Reg);
1981
1982      // FIXME: instead of isUse(), readsReg() would be a better fix here,
1983      // For example, we can ignore modifications in reg with undef. However,
1984      // it's not perfectly clear if skipping the internal read is safe in all
1985      // other targets.
1986    } else if (MO.isUse()) {
1987      if (!ModifiedRegUnits.available(Reg)) {
1988        HasRegDependency = true;
1989        break;
1990      }
1991      UsedOpsInCopy.push_back(i);
1992    }
1993  }
1994  return HasRegDependency;
1995}
1996
1997bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
1998                                         MachineFunction &MF,
1999                                         const TargetRegisterInfo *TRI,
2000                                         const TargetInstrInfo *TII) {
2001  SmallPtrSet<MachineBasicBlock *, 2> SinkableBBs;
2002  // FIXME: For now, we sink only to a successor which has a single predecessor
2003  // so that we can directly sink COPY instructions to the successor without
2004  // adding any new block or branch instruction.
2005  for (MachineBasicBlock *SI : CurBB.successors())
2006    if (!SI->livein_empty() && SI->pred_size() == 1)
2007      SinkableBBs.insert(SI);
2008
2009  if (SinkableBBs.empty())
2010    return false;
2011
2012  bool Changed = false;
2013
2014  // Track which registers have been modified and used between the end of the
2015  // block and the current instruction.
2016  ModifiedRegUnits.clear();
2017  UsedRegUnits.clear();
2018  SeenDbgInstrs.clear();
2019
2020  for (MachineInstr &MI : llvm::make_early_inc_range(llvm::reverse(CurBB))) {
2021    // Track the operand index for use in Copy.
2022    SmallVector<unsigned, 2> UsedOpsInCopy;
2023    // Track the register number defed in Copy.
2024    SmallVector<unsigned, 2> DefedRegsInCopy;
2025
2026    // We must sink this DBG_VALUE if its operand is sunk. To avoid searching
2027    // for DBG_VALUEs later, record them when they're encountered.
2028    if (MI.isDebugValue() && !MI.isDebugRef()) {
2029      SmallDenseMap<MCRegister, SmallVector<unsigned, 2>, 4> MIUnits;
2030      bool IsValid = true;
2031      for (MachineOperand &MO : MI.debug_operands()) {
2032        if (MO.isReg() && MO.getReg().isPhysical()) {
2033          // Bail if we can already tell the sink would be rejected, rather
2034          // than needlessly accumulating lots of DBG_VALUEs.
2035          if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2036                                    ModifiedRegUnits, UsedRegUnits)) {
2037            IsValid = false;
2038            break;
2039          }
2040
2041          // Record debug use of each reg unit.
2042          for (MCRegUnit Unit : TRI->regunits(MO.getReg()))
2043            MIUnits[Unit].push_back(MO.getReg());
2044        }
2045      }
2046      if (IsValid) {
2047        for (auto &RegOps : MIUnits)
2048          SeenDbgInstrs[RegOps.first].emplace_back(&MI,
2049                                                   std::move(RegOps.second));
2050      }
2051      continue;
2052    }
2053
2054    if (MI.isDebugOrPseudoInstr())
2055      continue;
2056
2057    // Do not move any instruction across function call.
2058    if (MI.isCall())
2059      return false;
2060
2061    if (!MI.isCopy() || !MI.getOperand(0).isRenamable()) {
2062      LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2063                                        TRI);
2064      continue;
2065    }
2066
2067    // Don't sink the COPY if it would violate a register dependency.
2068    if (hasRegisterDependency(&MI, UsedOpsInCopy, DefedRegsInCopy,
2069                              ModifiedRegUnits, UsedRegUnits)) {
2070      LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2071                                        TRI);
2072      continue;
2073    }
2074    assert((!UsedOpsInCopy.empty() && !DefedRegsInCopy.empty()) &&
2075           "Unexpect SrcReg or DefReg");
2076    MachineBasicBlock *SuccBB =
2077        getSingleLiveInSuccBB(CurBB, SinkableBBs, DefedRegsInCopy, TRI);
2078    // Don't sink if we cannot find a single sinkable successor in which Reg
2079    // is live-in.
2080    if (!SuccBB) {
2081      LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
2082                                        TRI);
2083      continue;
2084    }
2085    assert((SuccBB->pred_size() == 1 && *SuccBB->pred_begin() == &CurBB) &&
2086           "Unexpected predecessor");
2087
2088    // Collect DBG_VALUEs that must sink with this copy. We've previously
2089    // recorded which reg units that DBG_VALUEs read, if this instruction
2090    // writes any of those units then the corresponding DBG_VALUEs must sink.
2091    MapVector<MachineInstr *, MIRegs::second_type> DbgValsToSinkMap;
2092    for (auto &MO : MI.all_defs()) {
2093      for (MCRegUnit Unit : TRI->regunits(MO.getReg())) {
2094        for (const auto &MIRegs : SeenDbgInstrs.lookup(Unit)) {
2095          auto &Regs = DbgValsToSinkMap[MIRegs.first];
2096          for (unsigned Reg : MIRegs.second)
2097            Regs.push_back(Reg);
2098        }
2099      }
2100    }
2101    auto DbgValsToSink = DbgValsToSinkMap.takeVector();
2102
2103    LLVM_DEBUG(dbgs() << "Sink instr " << MI << "\tinto block " << *SuccBB);
2104
2105    MachineBasicBlock::iterator InsertPos =
2106        SuccBB->SkipPHIsAndLabels(SuccBB->begin());
2107    if (blockPrologueInterferes(SuccBB, InsertPos, MI, TRI, TII, nullptr)) {
2108      LLVM_DEBUG(
2109          dbgs() << " *** Not sinking: prologue interference\n");
2110      continue;
2111    }
2112
2113    // Clear the kill flag if SrcReg is killed between MI and the end of the
2114    // block.
2115    clearKillFlags(&MI, CurBB, UsedOpsInCopy, UsedRegUnits, TRI);
2116    performSink(MI, *SuccBB, InsertPos, DbgValsToSink);
2117    updateLiveIn(&MI, SuccBB, UsedOpsInCopy, DefedRegsInCopy);
2118
2119    Changed = true;
2120    ++NumPostRACopySink;
2121  }
2122  return Changed;
2123}
2124
2125bool PostRAMachineSinking::runOnMachineFunction(MachineFunction &MF) {
2126  if (skipFunction(MF.getFunction()))
2127    return false;
2128
2129  bool Changed = false;
2130  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2131  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2132
2133  ModifiedRegUnits.init(*TRI);
2134  UsedRegUnits.init(*TRI);
2135  for (auto &BB : MF)
2136    Changed |= tryToSinkCopy(BB, MF, TRI, TII);
2137
2138  return Changed;
2139}
2140