1//===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements a fast scheduler.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstrEmitter.h"
14#include "ScheduleDAGSDNodes.h"
15#include "SDNodeDbgValue.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/Statistic.h"
19#include "llvm/CodeGen/SchedulerRegistry.h"
20#include "llvm/CodeGen/SelectionDAGISel.h"
21#include "llvm/CodeGen/TargetInstrInfo.h"
22#include "llvm/CodeGen/TargetRegisterInfo.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/InlineAsm.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/raw_ostream.h"
28using namespace llvm;
29
30#define DEBUG_TYPE "pre-RA-sched"
31
32STATISTIC(NumUnfolds,    "Number of nodes unfolded");
33STATISTIC(NumDups,       "Number of duplicated nodes");
34STATISTIC(NumPRCopies,   "Number of physical copies");
35
36static RegisterScheduler
37  fastDAGScheduler("fast", "Fast suboptimal list scheduling",
38                   createFastDAGScheduler);
39static RegisterScheduler
40  linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
41                        createDAGLinearizer);
42
43
44namespace {
45  /// FastPriorityQueue - A degenerate priority queue that considers
46  /// all nodes to have the same priority.
47  ///
48  struct FastPriorityQueue {
49    SmallVector<SUnit *, 16> Queue;
50
51    bool empty() const { return Queue.empty(); }
52
53    void push(SUnit *U) {
54      Queue.push_back(U);
55    }
56
57    SUnit *pop() {
58      if (empty()) return nullptr;
59      SUnit *V = Queue.back();
60      Queue.pop_back();
61      return V;
62    }
63  };
64
65//===----------------------------------------------------------------------===//
66/// ScheduleDAGFast - The actual "fast" list scheduler implementation.
67///
68class ScheduleDAGFast : public ScheduleDAGSDNodes {
69private:
70  /// AvailableQueue - The priority queue to use for the available SUnits.
71  FastPriorityQueue AvailableQueue;
72
73  /// LiveRegDefs - A set of physical registers and their definition
74  /// that are "live". These nodes must be scheduled before any other nodes that
75  /// modifies the registers can be scheduled.
76  unsigned NumLiveRegs;
77  std::vector<SUnit*> LiveRegDefs;
78  std::vector<unsigned> LiveRegCycles;
79
80public:
81  ScheduleDAGFast(MachineFunction &mf)
82    : ScheduleDAGSDNodes(mf) {}
83
84  void Schedule() override;
85
86  /// AddPred - adds a predecessor edge to SUnit SU.
87  /// This returns true if this is a new predecessor.
88  void AddPred(SUnit *SU, const SDep &D) {
89    SU->addPred(D);
90  }
91
92  /// RemovePred - removes a predecessor edge from SUnit SU.
93  /// This returns true if an edge was removed.
94  void RemovePred(SUnit *SU, const SDep &D) {
95    SU->removePred(D);
96  }
97
98private:
99  void ReleasePred(SUnit *SU, SDep *PredEdge);
100  void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
101  void ScheduleNodeBottomUp(SUnit*, unsigned);
102  SUnit *CopyAndMoveSuccessors(SUnit*);
103  void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
104                                const TargetRegisterClass*,
105                                const TargetRegisterClass*,
106                                SmallVectorImpl<SUnit*>&);
107  bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
108  void ListScheduleBottomUp();
109
110  /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
111  bool forceUnitLatencies() const override { return true; }
112};
113}  // end anonymous namespace
114
115
116/// Schedule - Schedule the DAG using list scheduling.
117void ScheduleDAGFast::Schedule() {
118  LLVM_DEBUG(dbgs() << "********** List Scheduling **********\n");
119
120  NumLiveRegs = 0;
121  LiveRegDefs.resize(TRI->getNumRegs(), nullptr);
122  LiveRegCycles.resize(TRI->getNumRegs(), 0);
123
124  // Build the scheduling graph.
125  BuildSchedGraph(nullptr);
126
127  LLVM_DEBUG(dump());
128
129  // Execute the actual scheduling loop.
130  ListScheduleBottomUp();
131}
132
133//===----------------------------------------------------------------------===//
134//  Bottom-Up Scheduling
135//===----------------------------------------------------------------------===//
136
137/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
138/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
139void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
140  SUnit *PredSU = PredEdge->getSUnit();
141
142#ifndef NDEBUG
143  if (PredSU->NumSuccsLeft == 0) {
144    dbgs() << "*** Scheduling failed! ***\n";
145    dumpNode(*PredSU);
146    dbgs() << " has been released too many times!\n";
147    llvm_unreachable(nullptr);
148  }
149#endif
150  --PredSU->NumSuccsLeft;
151
152  // If all the node's successors are scheduled, this node is ready
153  // to be scheduled. Ignore the special EntrySU node.
154  if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
155    PredSU->isAvailable = true;
156    AvailableQueue.push(PredSU);
157  }
158}
159
160void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
161  // Bottom up: release predecessors
162  for (SDep &Pred : SU->Preds) {
163    ReleasePred(SU, &Pred);
164    if (Pred.isAssignedRegDep()) {
165      // This is a physical register dependency and it's impossible or
166      // expensive to copy the register. Make sure nothing that can
167      // clobber the register is scheduled between the predecessor and
168      // this node.
169      if (!LiveRegDefs[Pred.getReg()]) {
170        ++NumLiveRegs;
171        LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
172        LiveRegCycles[Pred.getReg()] = CurCycle;
173      }
174    }
175  }
176}
177
178/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
179/// count of its predecessors. If a predecessor pending count is zero, add it to
180/// the Available queue.
181void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
182  LLVM_DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
183  LLVM_DEBUG(dumpNode(*SU));
184
185  assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
186  SU->setHeightToAtLeast(CurCycle);
187  Sequence.push_back(SU);
188
189  ReleasePredecessors(SU, CurCycle);
190
191  // Release all the implicit physical register defs that are live.
192  for (SDep &Succ : SU->Succs) {
193    if (Succ.isAssignedRegDep()) {
194      if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
195        assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
196        assert(LiveRegDefs[Succ.getReg()] == SU &&
197               "Physical register dependency violated?");
198        --NumLiveRegs;
199        LiveRegDefs[Succ.getReg()] = nullptr;
200        LiveRegCycles[Succ.getReg()] = 0;
201      }
202    }
203  }
204
205  SU->isScheduled = true;
206}
207
208/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
209/// successors to the newly created node.
210SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
211  if (SU->getNode()->getGluedNode())
212    return nullptr;
213
214  SDNode *N = SU->getNode();
215  if (!N)
216    return nullptr;
217
218  SUnit *NewSU;
219  bool TryUnfold = false;
220  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
221    MVT VT = N->getSimpleValueType(i);
222    if (VT == MVT::Glue)
223      return nullptr;
224    else if (VT == MVT::Other)
225      TryUnfold = true;
226  }
227  for (const SDValue &Op : N->op_values()) {
228    MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo());
229    if (VT == MVT::Glue)
230      return nullptr;
231  }
232
233  if (TryUnfold) {
234    SmallVector<SDNode*, 2> NewNodes;
235    if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
236      return nullptr;
237
238    LLVM_DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
239    assert(NewNodes.size() == 2 && "Expected a load folding node!");
240
241    N = NewNodes[1];
242    SDNode *LoadNode = NewNodes[0];
243    unsigned NumVals = N->getNumValues();
244    unsigned OldNumVals = SU->getNode()->getNumValues();
245    for (unsigned i = 0; i != NumVals; ++i)
246      DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
247    DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
248                                   SDValue(LoadNode, 1));
249
250    SUnit *NewSU = newSUnit(N);
251    assert(N->getNodeId() == -1 && "Node already inserted!");
252    N->setNodeId(NewSU->NodeNum);
253
254    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
255    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
256      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
257        NewSU->isTwoAddress = true;
258        break;
259      }
260    }
261    if (MCID.isCommutable())
262      NewSU->isCommutable = true;
263
264    // LoadNode may already exist. This can happen when there is another
265    // load from the same location and producing the same type of value
266    // but it has different alignment or volatileness.
267    bool isNewLoad = true;
268    SUnit *LoadSU;
269    if (LoadNode->getNodeId() != -1) {
270      LoadSU = &SUnits[LoadNode->getNodeId()];
271      isNewLoad = false;
272    } else {
273      LoadSU = newSUnit(LoadNode);
274      LoadNode->setNodeId(LoadSU->NodeNum);
275    }
276
277    SDep ChainPred;
278    SmallVector<SDep, 4> ChainSuccs;
279    SmallVector<SDep, 4> LoadPreds;
280    SmallVector<SDep, 4> NodePreds;
281    SmallVector<SDep, 4> NodeSuccs;
282    for (SDep &Pred : SU->Preds) {
283      if (Pred.isCtrl())
284        ChainPred = Pred;
285      else if (Pred.getSUnit()->getNode() &&
286               Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
287        LoadPreds.push_back(Pred);
288      else
289        NodePreds.push_back(Pred);
290    }
291    for (SDep &Succ : SU->Succs) {
292      if (Succ.isCtrl())
293        ChainSuccs.push_back(Succ);
294      else
295        NodeSuccs.push_back(Succ);
296    }
297
298    if (ChainPred.getSUnit()) {
299      RemovePred(SU, ChainPred);
300      if (isNewLoad)
301        AddPred(LoadSU, ChainPred);
302    }
303    for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
304      const SDep &Pred = LoadPreds[i];
305      RemovePred(SU, Pred);
306      if (isNewLoad) {
307        AddPred(LoadSU, Pred);
308      }
309    }
310    for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
311      const SDep &Pred = NodePreds[i];
312      RemovePred(SU, Pred);
313      AddPred(NewSU, Pred);
314    }
315    for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
316      SDep D = NodeSuccs[i];
317      SUnit *SuccDep = D.getSUnit();
318      D.setSUnit(SU);
319      RemovePred(SuccDep, D);
320      D.setSUnit(NewSU);
321      AddPred(SuccDep, D);
322    }
323    for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
324      SDep D = ChainSuccs[i];
325      SUnit *SuccDep = D.getSUnit();
326      D.setSUnit(SU);
327      RemovePred(SuccDep, D);
328      if (isNewLoad) {
329        D.setSUnit(LoadSU);
330        AddPred(SuccDep, D);
331      }
332    }
333    if (isNewLoad) {
334      SDep D(LoadSU, SDep::Barrier);
335      D.setLatency(LoadSU->Latency);
336      AddPred(NewSU, D);
337    }
338
339    ++NumUnfolds;
340
341    if (NewSU->NumSuccsLeft == 0) {
342      NewSU->isAvailable = true;
343      return NewSU;
344    }
345    SU = NewSU;
346  }
347
348  LLVM_DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
349  NewSU = Clone(SU);
350
351  // New SUnit has the exact same predecessors.
352  for (SDep &Pred : SU->Preds)
353    if (!Pred.isArtificial())
354      AddPred(NewSU, Pred);
355
356  // Only copy scheduled successors. Cut them from old node's successor
357  // list and move them over.
358  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
359  for (SDep &Succ : SU->Succs) {
360    if (Succ.isArtificial())
361      continue;
362    SUnit *SuccSU = Succ.getSUnit();
363    if (SuccSU->isScheduled) {
364      SDep D = Succ;
365      D.setSUnit(NewSU);
366      AddPred(SuccSU, D);
367      D.setSUnit(SU);
368      DelDeps.push_back(std::make_pair(SuccSU, D));
369    }
370  }
371  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
372    RemovePred(DelDeps[i].first, DelDeps[i].second);
373
374  ++NumDups;
375  return NewSU;
376}
377
378/// InsertCopiesAndMoveSuccs - Insert register copies and move all
379/// scheduled successors of the given SUnit to the last copy.
380void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
381                                              const TargetRegisterClass *DestRC,
382                                              const TargetRegisterClass *SrcRC,
383                                              SmallVectorImpl<SUnit*> &Copies) {
384  SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr));
385  CopyFromSU->CopySrcRC = SrcRC;
386  CopyFromSU->CopyDstRC = DestRC;
387
388  SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr));
389  CopyToSU->CopySrcRC = DestRC;
390  CopyToSU->CopyDstRC = SrcRC;
391
392  // Only copy scheduled successors. Cut them from old node's successor
393  // list and move them over.
394  SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
395  for (SDep &Succ : SU->Succs) {
396    if (Succ.isArtificial())
397      continue;
398    SUnit *SuccSU = Succ.getSUnit();
399    if (SuccSU->isScheduled) {
400      SDep D = Succ;
401      D.setSUnit(CopyToSU);
402      AddPred(SuccSU, D);
403      DelDeps.push_back(std::make_pair(SuccSU, Succ));
404    }
405  }
406  for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
407    RemovePred(DelDeps[i].first, DelDeps[i].second);
408  }
409  SDep FromDep(SU, SDep::Data, Reg);
410  FromDep.setLatency(SU->Latency);
411  AddPred(CopyFromSU, FromDep);
412  SDep ToDep(CopyFromSU, SDep::Data, 0);
413  ToDep.setLatency(CopyFromSU->Latency);
414  AddPred(CopyToSU, ToDep);
415
416  Copies.push_back(CopyFromSU);
417  Copies.push_back(CopyToSU);
418
419  ++NumPRCopies;
420}
421
422/// getPhysicalRegisterVT - Returns the ValueType of the physical register
423/// definition of the specified node.
424/// FIXME: Move to SelectionDAG?
425static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
426                                 const TargetInstrInfo *TII) {
427  unsigned NumRes;
428  if (N->getOpcode() == ISD::CopyFromReg) {
429    // CopyFromReg has: "chain, Val, glue" so operand 1 gives the type.
430    NumRes = 1;
431  } else {
432    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
433    assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
434    NumRes = MCID.getNumDefs();
435    for (const MCPhysReg *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
436      if (Reg == *ImpDef)
437        break;
438      ++NumRes;
439    }
440  }
441  return N->getSimpleValueType(NumRes);
442}
443
444/// CheckForLiveRegDef - Return true and update live register vector if the
445/// specified register def of the specified SUnit clobbers any "live" registers.
446static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
447                               std::vector<SUnit*> &LiveRegDefs,
448                               SmallSet<unsigned, 4> &RegAdded,
449                               SmallVectorImpl<unsigned> &LRegs,
450                               const TargetRegisterInfo *TRI) {
451  bool Added = false;
452  for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
453    if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
454      if (RegAdded.insert(*AI).second) {
455        LRegs.push_back(*AI);
456        Added = true;
457      }
458    }
459  }
460  return Added;
461}
462
463/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
464/// scheduling of the given node to satisfy live physical register dependencies.
465/// If the specific node is the last one that's available to schedule, do
466/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
467bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
468                                              SmallVectorImpl<unsigned> &LRegs){
469  if (NumLiveRegs == 0)
470    return false;
471
472  SmallSet<unsigned, 4> RegAdded;
473  // If this node would clobber any "live" register, then it's not ready.
474  for (SDep &Pred : SU->Preds) {
475    if (Pred.isAssignedRegDep()) {
476      CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
477                         RegAdded, LRegs, TRI);
478    }
479  }
480
481  for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
482    if (Node->getOpcode() == ISD::INLINEASM ||
483        Node->getOpcode() == ISD::INLINEASM_BR) {
484      // Inline asm can clobber physical defs.
485      unsigned NumOps = Node->getNumOperands();
486      if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
487        --NumOps;  // Ignore the glue operand.
488
489      for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
490        unsigned Flags =
491          cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
492        unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
493
494        ++i; // Skip the ID value.
495        if (InlineAsm::isRegDefKind(Flags) ||
496            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
497            InlineAsm::isClobberKind(Flags)) {
498          // Check for def of register or earlyclobber register.
499          for (; NumVals; --NumVals, ++i) {
500            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
501            if (Register::isPhysicalRegister(Reg))
502              CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
503          }
504        } else
505          i += NumVals;
506      }
507      continue;
508    }
509    if (!Node->isMachineOpcode())
510      continue;
511    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
512    if (!MCID.ImplicitDefs)
513      continue;
514    for (const MCPhysReg *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
515      CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
516    }
517  }
518  return !LRegs.empty();
519}
520
521
522/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
523/// schedulers.
524void ScheduleDAGFast::ListScheduleBottomUp() {
525  unsigned CurCycle = 0;
526
527  // Release any predecessors of the special Exit node.
528  ReleasePredecessors(&ExitSU, CurCycle);
529
530  // Add root to Available queue.
531  if (!SUnits.empty()) {
532    SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
533    assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
534    RootSU->isAvailable = true;
535    AvailableQueue.push(RootSU);
536  }
537
538  // While Available queue is not empty, grab the node with the highest
539  // priority. If it is not ready put it back.  Schedule the node.
540  SmallVector<SUnit*, 4> NotReady;
541  DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
542  Sequence.reserve(SUnits.size());
543  while (!AvailableQueue.empty()) {
544    bool Delayed = false;
545    LRegsMap.clear();
546    SUnit *CurSU = AvailableQueue.pop();
547    while (CurSU) {
548      SmallVector<unsigned, 4> LRegs;
549      if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
550        break;
551      Delayed = true;
552      LRegsMap.insert(std::make_pair(CurSU, LRegs));
553
554      CurSU->isPending = true;  // This SU is not in AvailableQueue right now.
555      NotReady.push_back(CurSU);
556      CurSU = AvailableQueue.pop();
557    }
558
559    // All candidates are delayed due to live physical reg dependencies.
560    // Try code duplication or inserting cross class copies
561    // to resolve it.
562    if (Delayed && !CurSU) {
563      if (!CurSU) {
564        // Try duplicating the nodes that produces these
565        // "expensive to copy" values to break the dependency. In case even
566        // that doesn't work, insert cross class copies.
567        SUnit *TrySU = NotReady[0];
568        SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
569        assert(LRegs.size() == 1 && "Can't handle this yet!");
570        unsigned Reg = LRegs[0];
571        SUnit *LRDef = LiveRegDefs[Reg];
572        MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
573        const TargetRegisterClass *RC =
574          TRI->getMinimalPhysRegClass(Reg, VT);
575        const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
576
577        // If cross copy register class is the same as RC, then it must be
578        // possible copy the value directly. Do not try duplicate the def.
579        // If cross copy register class is not the same as RC, then it's
580        // possible to copy the value but it require cross register class copies
581        // and it is expensive.
582        // If cross copy register class is null, then it's not possible to copy
583        // the value at all.
584        SUnit *NewDef = nullptr;
585        if (DestRC != RC) {
586          NewDef = CopyAndMoveSuccessors(LRDef);
587          if (!DestRC && !NewDef)
588            report_fatal_error("Can't handle live physical "
589                               "register dependency!");
590        }
591        if (!NewDef) {
592          // Issue copies, these can be expensive cross register class copies.
593          SmallVector<SUnit*, 2> Copies;
594          InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
595          LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
596                            << " to SU #" << Copies.front()->NodeNum << "\n");
597          AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
598          NewDef = Copies.back();
599        }
600
601        LLVM_DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
602                          << " to SU #" << TrySU->NodeNum << "\n");
603        LiveRegDefs[Reg] = NewDef;
604        AddPred(NewDef, SDep(TrySU, SDep::Artificial));
605        TrySU->isAvailable = false;
606        CurSU = NewDef;
607      }
608
609      if (!CurSU) {
610        llvm_unreachable("Unable to resolve live physical register dependencies!");
611      }
612    }
613
614    // Add the nodes that aren't ready back onto the available list.
615    for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
616      NotReady[i]->isPending = false;
617      // May no longer be available due to backtracking.
618      if (NotReady[i]->isAvailable)
619        AvailableQueue.push(NotReady[i]);
620    }
621    NotReady.clear();
622
623    if (CurSU)
624      ScheduleNodeBottomUp(CurSU, CurCycle);
625    ++CurCycle;
626  }
627
628  // Reverse the order since it is bottom up.
629  std::reverse(Sequence.begin(), Sequence.end());
630
631#ifndef NDEBUG
632  VerifyScheduledSequence(/*isBottomUp=*/true);
633#endif
634}
635
636
637namespace {
638//===----------------------------------------------------------------------===//
639// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
640// DAG in topological order.
641// IMPORTANT: this may not work for targets with phyreg dependency.
642//
643class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
644public:
645  ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
646
647  void Schedule() override;
648
649  MachineBasicBlock *
650    EmitSchedule(MachineBasicBlock::iterator &InsertPos) override;
651
652private:
653  std::vector<SDNode*> Sequence;
654  DenseMap<SDNode*, SDNode*> GluedMap;  // Cache glue to its user
655
656  void ScheduleNode(SDNode *N);
657};
658} // end anonymous namespace
659
660void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
661  if (N->getNodeId() != 0)
662    llvm_unreachable(nullptr);
663
664  if (!N->isMachineOpcode() &&
665      (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
666    // These nodes do not need to be translated into MIs.
667    return;
668
669  LLVM_DEBUG(dbgs() << "\n*** Scheduling: ");
670  LLVM_DEBUG(N->dump(DAG));
671  Sequence.push_back(N);
672
673  unsigned NumOps = N->getNumOperands();
674  if (unsigned NumLeft = NumOps) {
675    SDNode *GluedOpN = nullptr;
676    do {
677      const SDValue &Op = N->getOperand(NumLeft-1);
678      SDNode *OpN = Op.getNode();
679
680      if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
681        // Schedule glue operand right above N.
682        GluedOpN = OpN;
683        assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
684        OpN->setNodeId(0);
685        ScheduleNode(OpN);
686        continue;
687      }
688
689      if (OpN == GluedOpN)
690        // Glue operand is already scheduled.
691        continue;
692
693      DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
694      if (DI != GluedMap.end() && DI->second != N)
695        // Users of glues are counted against the glued users.
696        OpN = DI->second;
697
698      unsigned Degree = OpN->getNodeId();
699      assert(Degree > 0 && "Predecessor over-released!");
700      OpN->setNodeId(--Degree);
701      if (Degree == 0)
702        ScheduleNode(OpN);
703    } while (--NumLeft);
704  }
705}
706
707/// findGluedUser - Find the representative use of a glue value by walking
708/// the use chain.
709static SDNode *findGluedUser(SDNode *N) {
710  while (SDNode *Glued = N->getGluedUser())
711    N = Glued;
712  return N;
713}
714
715void ScheduleDAGLinearize::Schedule() {
716  LLVM_DEBUG(dbgs() << "********** DAG Linearization **********\n");
717
718  SmallVector<SDNode*, 8> Glues;
719  unsigned DAGSize = 0;
720  for (SDNode &Node : DAG->allnodes()) {
721    SDNode *N = &Node;
722
723    // Use node id to record degree.
724    unsigned Degree = N->use_size();
725    N->setNodeId(Degree);
726    unsigned NumVals = N->getNumValues();
727    if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
728        N->hasAnyUseOfValue(NumVals-1)) {
729      SDNode *User = findGluedUser(N);
730      if (User) {
731        Glues.push_back(N);
732        GluedMap.insert(std::make_pair(N, User));
733      }
734    }
735
736    if (N->isMachineOpcode() ||
737        (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
738      ++DAGSize;
739  }
740
741  for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
742    SDNode *Glue = Glues[i];
743    SDNode *GUser = GluedMap[Glue];
744    unsigned Degree = Glue->getNodeId();
745    unsigned UDegree = GUser->getNodeId();
746
747    // Glue user must be scheduled together with the glue operand. So other
748    // users of the glue operand must be treated as its users.
749    SDNode *ImmGUser = Glue->getGluedUser();
750    for (const SDNode *U : Glue->uses())
751      if (U == ImmGUser)
752        --Degree;
753    GUser->setNodeId(UDegree + Degree);
754    Glue->setNodeId(1);
755  }
756
757  Sequence.reserve(DAGSize);
758  ScheduleNode(DAG->getRoot().getNode());
759}
760
761MachineBasicBlock*
762ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
763  InstrEmitter Emitter(BB, InsertPos);
764  DenseMap<SDValue, unsigned> VRBaseMap;
765
766  LLVM_DEBUG({ dbgs() << "\n*** Final schedule ***\n"; });
767
768  unsigned NumNodes = Sequence.size();
769  MachineBasicBlock *BB = Emitter.getBlock();
770  for (unsigned i = 0; i != NumNodes; ++i) {
771    SDNode *N = Sequence[NumNodes-i-1];
772    LLVM_DEBUG(N->dump(DAG));
773    Emitter.EmitNode(N, false, false, VRBaseMap);
774
775    // Emit any debug values associated with the node.
776    if (N->getHasDebugValue()) {
777      MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
778      for (auto DV : DAG->GetDbgValues(N)) {
779        if (!DV->isEmitted())
780          if (auto *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap))
781            BB->insert(InsertPos, DbgMI);
782      }
783    }
784  }
785
786  LLVM_DEBUG(dbgs() << '\n');
787
788  InsertPos = Emitter.getInsertPos();
789  return Emitter.getBlock();
790}
791
792//===----------------------------------------------------------------------===//
793//                         Public Constructor Functions
794//===----------------------------------------------------------------------===//
795
796llvm::ScheduleDAGSDNodes *
797llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
798  return new ScheduleDAGFast(*IS->MF);
799}
800
801llvm::ScheduleDAGSDNodes *
802llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
803  return new ScheduleDAGLinearize(*IS->MF);
804}
805