Deleted Added
sdiff udiff text old ( 204792 ) new ( 207618 )
full compact
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "AntiDepBreaker.h"
23#include "AggressiveAntiDepBreaker.h"
24#include "CriticalAntiDepBreaker.h"
25#include "ExactHazardRecognizer.h"
26#include "SimpleHazardRecognizer.h"
27#include "ScheduleDAGInstrs.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/LatencyPriorityQueue.h"
30#include "llvm/CodeGen/SchedulerRegistry.h"
31#include "llvm/CodeGen/MachineDominators.h"
32#include "llvm/CodeGen/MachineFrameInfo.h"
33#include "llvm/CodeGen/MachineFunctionPass.h"
34#include "llvm/CodeGen/MachineLoopInfo.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
37#include "llvm/Analysis/AliasAnalysis.h"
38#include "llvm/Target/TargetLowering.h"
39#include "llvm/Target/TargetMachine.h"
40#include "llvm/Target/TargetInstrInfo.h"
41#include "llvm/Target/TargetRegisterInfo.h"
42#include "llvm/Target/TargetSubtarget.h"
43#include "llvm/Support/CommandLine.h"
44#include "llvm/Support/Debug.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/raw_ostream.h"
47#include "llvm/ADT/BitVector.h"
48#include "llvm/ADT/Statistic.h"
49#include <map>
50#include <set>
51using namespace llvm;
52
53STATISTIC(NumNoops, "Number of noops inserted");
54STATISTIC(NumStalls, "Number of pipeline stalls");
55STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
56
57// Post-RA scheduling is enabled with
58// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
59// override the target.
60static cl::opt<bool>
61EnablePostRAScheduler("post-RA-scheduler",
62 cl::desc("Enable scheduling after register allocation"),
63 cl::init(false), cl::Hidden);
64static cl::opt<std::string>
65EnableAntiDepBreaking("break-anti-dependencies",
66 cl::desc("Break post-RA scheduling anti-dependencies: "
67 "\"critical\", \"all\", or \"none\""),
68 cl::init("none"), cl::Hidden);
69static cl::opt<bool>
70EnablePostRAHazardAvoidance("avoid-hazards",
71 cl::desc("Enable exact hazard avoidance"),
72 cl::init(true), cl::Hidden);
73
74// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
75static cl::opt<int>
76DebugDiv("postra-sched-debugdiv",
77 cl::desc("Debug control MBBs that are scheduled"),
78 cl::init(0), cl::Hidden);
79static cl::opt<int>
80DebugMod("postra-sched-debugmod",
81 cl::desc("Debug control MBBs that are scheduled"),
82 cl::init(0), cl::Hidden);
83
84AntiDepBreaker::~AntiDepBreaker() { }
85
86namespace {
87 class PostRAScheduler : public MachineFunctionPass {
88 AliasAnalysis *AA;
89 CodeGenOpt::Level OptLevel;
90
91 public:
92 static char ID;
93 PostRAScheduler(CodeGenOpt::Level ol) :
94 MachineFunctionPass(&ID), OptLevel(ol) {}
95
96 void getAnalysisUsage(AnalysisUsage &AU) const {
97 AU.setPreservesCFG();
98 AU.addRequired<AliasAnalysis>();
99 AU.addRequired<MachineDominatorTree>();
100 AU.addPreserved<MachineDominatorTree>();
101 AU.addRequired<MachineLoopInfo>();
102 AU.addPreserved<MachineLoopInfo>();
103 MachineFunctionPass::getAnalysisUsage(AU);
104 }
105
106 const char *getPassName() const {
107 return "Post RA top-down list latency scheduler";
108 }
109
110 bool runOnMachineFunction(MachineFunction &Fn);
111 };
112 char PostRAScheduler::ID = 0;
113
114 class SchedulePostRATDList : public ScheduleDAGInstrs {
115 /// AvailableQueue - The priority queue to use for the available SUnits.
116 ///
117 LatencyPriorityQueue AvailableQueue;
118
119 /// PendingQueue - This contains all of the instructions whose operands have
120 /// been issued, but their results are not ready yet (due to the latency of
121 /// the operation). Once the operands becomes available, the instruction is
122 /// added to the AvailableQueue.
123 std::vector<SUnit*> PendingQueue;
124
125 /// Topo - A topological ordering for SUnits.
126 ScheduleDAGTopologicalSort Topo;
127
128 /// HazardRec - The hazard recognizer to use.
129 ScheduleHazardRecognizer *HazardRec;
130
131 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
132 AntiDepBreaker *AntiDepBreak;
133
134 /// AA - AliasAnalysis for making memory reference queries.
135 AliasAnalysis *AA;
136
137 /// KillIndices - The index of the most recent kill (proceding bottom-up),
138 /// or ~0u if the register is not live.
139 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
140
141 public:
142 SchedulePostRATDList(MachineFunction &MF,
143 const MachineLoopInfo &MLI,
144 const MachineDominatorTree &MDT,
145 ScheduleHazardRecognizer *HR,
146 AntiDepBreaker *ADB,
147 AliasAnalysis *aa)
148 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
149 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
150
151 ~SchedulePostRATDList() {
152 }
153
154 /// StartBlock - Initialize register live-range state for scheduling in
155 /// this block.
156 ///
157 void StartBlock(MachineBasicBlock *BB);
158
159 /// Schedule - Schedule the instruction range using list scheduling.
160 ///
161 void Schedule();
162
163 /// Observe - Update liveness information to account for the current
164 /// instruction, which will not be scheduled.
165 ///
166 void Observe(MachineInstr *MI, unsigned Count);
167
168 /// FinishBlock - Clean up register live-range state.
169 ///
170 void FinishBlock();
171
172 /// FixupKills - Fix register kill flags that have been made
173 /// invalid due to scheduling
174 ///
175 void FixupKills(MachineBasicBlock *MBB);
176
177 private:
178 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
179 void ReleaseSuccessors(SUnit *SU);
180 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
181 void ListScheduleTopDown();
182 void StartBlockForKills(MachineBasicBlock *BB);
183
184 // ToggleKillFlag - Toggle a register operand kill flag. Other
185 // adjustments may be made to the instruction if necessary. Return
186 // true if the operand has been deleted, false if not.
187 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
188 };
189}
190
191/// isSchedulingBoundary - Test if the given instruction should be
192/// considered a scheduling boundary. This primarily includes labels
193/// and terminators.
194///
195static bool isSchedulingBoundary(const MachineInstr *MI,
196 const MachineFunction &MF) {
197 // Terminators and labels can't be scheduled around.
198 if (MI->getDesc().isTerminator() || MI->isLabel())
199 return true;
200
201 // Don't attempt to schedule around any instruction that modifies
202 // a stack-oriented pointer, as it's unlikely to be profitable. This
203 // saves compile time, because it doesn't require every single
204 // stack slot reference to depend on the instruction that does the
205 // modification.
206 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
207 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
208 return true;
209
210 return false;
211}
212
213bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
214 AA = &getAnalysis<AliasAnalysis>();
215
216 // Check for explicit enable/disable of post-ra scheduling.
217 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
218 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
219 if (EnablePostRAScheduler.getPosition() > 0) {
220 if (!EnablePostRAScheduler)
221 return false;
222 } else {
223 // Check that post-RA scheduling is enabled for this target.
224 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
225 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
226 return false;
227 }
228
229 // Check for antidep breaking override...
230 if (EnableAntiDepBreaking.getPosition() > 0) {
231 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
232 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
233 TargetSubtarget::ANTIDEP_NONE;
234 }
235
236 DEBUG(dbgs() << "PostRAScheduler\n");
237
238 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
239 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
240 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
241 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
242 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
243 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
244 AntiDepBreaker *ADB =
245 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
246 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
247 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
248 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
249
250 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
251
252 // Loop over all of the basic blocks
253 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
254 MBB != MBBe; ++MBB) {
255#ifndef NDEBUG
256 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
257 if (DebugDiv > 0) {
258 static int bbcnt = 0;
259 if (bbcnt++ % DebugDiv != DebugMod)
260 continue;
261 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
262 ":BB#" << MBB->getNumber() << " ***\n";
263 }
264#endif
265
266 // Initialize register live-range state for scheduling in this block.
267 Scheduler.StartBlock(MBB);
268
269 // Schedule each sequence of instructions not interrupted by a label
270 // or anything else that effectively needs to shut down scheduling.
271 MachineBasicBlock::iterator Current = MBB->end();
272 unsigned Count = MBB->size(), CurrentCount = Count;
273 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
274 MachineInstr *MI = prior(I);
275 if (isSchedulingBoundary(MI, Fn)) {
276 Scheduler.Run(MBB, I, Current, CurrentCount);
277 Scheduler.EmitSchedule(0);
278 Current = MI;
279 CurrentCount = Count - 1;
280 Scheduler.Observe(MI, CurrentCount);
281 }
282 I = MI;
283 --Count;
284 }
285 assert(Count == 0 && "Instruction count mismatch!");
286 assert((MBB->begin() == Current || CurrentCount != 0) &&
287 "Instruction count mismatch!");
288 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
289 Scheduler.EmitSchedule(0);
290
291 // Clean up register live-range state.
292 Scheduler.FinishBlock();
293
294 // Update register kills
295 Scheduler.FixupKills(MBB);
296 }
297
298 delete HR;
299 delete ADB;
300
301 return true;
302}
303
304/// StartBlock - Initialize register live-range state for scheduling in
305/// this block.
306///
307void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
308 // Call the superclass.
309 ScheduleDAGInstrs::StartBlock(BB);
310
311 // Reset the hazard recognizer and anti-dep breaker.
312 HazardRec->Reset();
313 if (AntiDepBreak != NULL)
314 AntiDepBreak->StartBlock(BB);
315}
316
317/// Schedule - Schedule the instruction range using list scheduling.
318///
319void SchedulePostRATDList::Schedule() {
320 // Build the scheduling graph.
321 BuildSchedGraph(AA);
322
323 if (AntiDepBreak != NULL) {
324 unsigned Broken =
325 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
326 InsertPosIndex);
327
328 if (Broken != 0) {
329 // We made changes. Update the dependency graph.
330 // Theoretically we could update the graph in place:
331 // When a live range is changed to use a different register, remove
332 // the def's anti-dependence *and* output-dependence edges due to
333 // that register, and add new anti-dependence and output-dependence
334 // edges based on the next live range of the register.
335 SUnits.clear();
336 Sequence.clear();
337 EntrySU = SUnit();
338 ExitSU = SUnit();
339 BuildSchedGraph(AA);
340
341 NumFixedAnti += Broken;
342 }
343 }
344
345 DEBUG(dbgs() << "********** List Scheduling **********\n");
346 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
347 SUnits[su].dumpAll(this));
348
349 AvailableQueue.initNodes(SUnits);
350 ListScheduleTopDown();
351 AvailableQueue.releaseState();
352}
353
354/// Observe - Update liveness information to account for the current
355/// instruction, which will not be scheduled.
356///
357void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
358 if (AntiDepBreak != NULL)
359 AntiDepBreak->Observe(MI, Count, InsertPosIndex);
360}
361
362/// FinishBlock - Clean up register live-range state.
363///
364void SchedulePostRATDList::FinishBlock() {
365 if (AntiDepBreak != NULL)
366 AntiDepBreak->FinishBlock();
367
368 // Call the superclass.
369 ScheduleDAGInstrs::FinishBlock();
370}
371
372/// StartBlockForKills - Initialize register live-range state for updating kills
373///
374void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
375 // Initialize the indices to indicate that no registers are live.
376 for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
377 KillIndices[i] = ~0u;
378
379 // Determine the live-out physregs for this block.
380 if (!BB->empty() && BB->back().getDesc().isReturn()) {
381 // In a return block, examine the function live-out regs.
382 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
383 E = MRI.liveout_end(); I != E; ++I) {
384 unsigned Reg = *I;
385 KillIndices[Reg] = BB->size();
386 // Repeat, for all subregs.
387 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
388 *Subreg; ++Subreg) {
389 KillIndices[*Subreg] = BB->size();
390 }
391 }
392 }
393 else {
394 // In a non-return block, examine the live-in regs of all successors.
395 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
396 SE = BB->succ_end(); SI != SE; ++SI) {
397 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
398 E = (*SI)->livein_end(); I != E; ++I) {
399 unsigned Reg = *I;
400 KillIndices[Reg] = BB->size();
401 // Repeat, for all subregs.
402 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
403 *Subreg; ++Subreg) {
404 KillIndices[*Subreg] = BB->size();
405 }
406 }
407 }
408 }
409}
410
411bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
412 MachineOperand &MO) {
413 // Setting kill flag...
414 if (!MO.isKill()) {
415 MO.setIsKill(true);
416 return false;
417 }
418
419 // If MO itself is live, clear the kill flag...
420 if (KillIndices[MO.getReg()] != ~0u) {
421 MO.setIsKill(false);
422 return false;
423 }
424
425 // If any subreg of MO is live, then create an imp-def for that
426 // subreg and keep MO marked as killed.
427 MO.setIsKill(false);
428 bool AllDead = true;
429 const unsigned SuperReg = MO.getReg();
430 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
431 *Subreg; ++Subreg) {
432 if (KillIndices[*Subreg] != ~0u) {
433 MI->addOperand(MachineOperand::CreateReg(*Subreg,
434 true /*IsDef*/,
435 true /*IsImp*/,
436 false /*IsKill*/,
437 false /*IsDead*/));
438 AllDead = false;
439 }
440 }
441
442 if(AllDead)
443 MO.setIsKill(true);
444 return false;
445}
446
447/// FixupKills - Fix the register kill flags, they may have been made
448/// incorrect by instruction reordering.
449///
450void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
451 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
452
453 std::set<unsigned> killedRegs;
454 BitVector ReservedRegs = TRI->getReservedRegs(MF);
455
456 StartBlockForKills(MBB);
457
458 // Examine block from end to start...
459 unsigned Count = MBB->size();
460 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
461 I != E; --Count) {
462 MachineInstr *MI = --I;
463 if (MI->isDebugValue())
464 continue;
465
466 // Update liveness. Registers that are defed but not used in this
467 // instruction are now dead. Mark register and all subregs as they
468 // are completely defined.
469 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
470 MachineOperand &MO = MI->getOperand(i);
471 if (!MO.isReg()) continue;
472 unsigned Reg = MO.getReg();
473 if (Reg == 0) continue;
474 if (!MO.isDef()) continue;
475 // Ignore two-addr defs.
476 if (MI->isRegTiedToUseOperand(i)) continue;
477
478 KillIndices[Reg] = ~0u;
479
480 // Repeat for all subregs.
481 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
482 *Subreg; ++Subreg) {
483 KillIndices[*Subreg] = ~0u;
484 }
485 }
486
487 // Examine all used registers and set/clear kill flag. When a
488 // register is used multiple times we only set the kill flag on
489 // the first use.
490 killedRegs.clear();
491 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
492 MachineOperand &MO = MI->getOperand(i);
493 if (!MO.isReg() || !MO.isUse()) continue;
494 unsigned Reg = MO.getReg();
495 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
496
497 bool kill = false;
498 if (killedRegs.find(Reg) == killedRegs.end()) {
499 kill = true;
500 // A register is not killed if any subregs are live...
501 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
502 *Subreg; ++Subreg) {
503 if (KillIndices[*Subreg] != ~0u) {
504 kill = false;
505 break;
506 }
507 }
508
509 // If subreg is not live, then register is killed if it became
510 // live in this instruction
511 if (kill)
512 kill = (KillIndices[Reg] == ~0u);
513 }
514
515 if (MO.isKill() != kill) {
516 DEBUG(dbgs() << "Fixing " << MO << " in ");
517 // Warning: ToggleKillFlag may invalidate MO.
518 ToggleKillFlag(MI, MO);
519 DEBUG(MI->dump());
520 }
521
522 killedRegs.insert(Reg);
523 }
524
525 // Mark any used register (that is not using undef) and subregs as
526 // now live...
527 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
528 MachineOperand &MO = MI->getOperand(i);
529 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
530 unsigned Reg = MO.getReg();
531 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
532
533 KillIndices[Reg] = Count;
534
535 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
536 *Subreg; ++Subreg) {
537 KillIndices[*Subreg] = Count;
538 }
539 }
540 }
541}
542
543//===----------------------------------------------------------------------===//
544// Top-Down Scheduling
545//===----------------------------------------------------------------------===//
546
547/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
548/// the PendingQueue if the count reaches zero. Also update its cycle bound.
549void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
550 SUnit *SuccSU = SuccEdge->getSUnit();
551
552#ifndef NDEBUG
553 if (SuccSU->NumPredsLeft == 0) {
554 dbgs() << "*** Scheduling failed! ***\n";
555 SuccSU->dump(this);
556 dbgs() << " has been released too many times!\n";
557 llvm_unreachable(0);
558 }
559#endif
560 --SuccSU->NumPredsLeft;
561
562 // Compute how many cycles it will be before this actually becomes
563 // available. This is the max of the start time of all predecessors plus
564 // their latencies.
565 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
566
567 // If all the node's predecessors are scheduled, this node is ready
568 // to be scheduled. Ignore the special ExitSU node.
569 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
570 PendingQueue.push_back(SuccSU);
571}
572
573/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
574void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
575 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
576 I != E; ++I) {
577 ReleaseSucc(SU, &*I);
578 }
579}
580
581/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
582/// count of its successors. If a successor pending count is zero, add it to
583/// the Available queue.
584void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
585 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
586 DEBUG(SU->dump(this));
587
588 Sequence.push_back(SU);
589 assert(CurCycle >= SU->getDepth() &&
590 "Node scheduled above its depth!");
591 SU->setDepthToAtLeast(CurCycle);
592
593 ReleaseSuccessors(SU);
594 SU->isScheduled = true;
595 AvailableQueue.ScheduledNode(SU);
596}
597
598/// ListScheduleTopDown - The main loop of list scheduling for top-down
599/// schedulers.
600void SchedulePostRATDList::ListScheduleTopDown() {
601 unsigned CurCycle = 0;
602
603 // We're scheduling top-down but we're visiting the regions in
604 // bottom-up order, so we don't know the hazards at the start of a
605 // region. So assume no hazards (this should usually be ok as most
606 // blocks are a single region).
607 HazardRec->Reset();
608
609 // Release any successors of the special Entry node.
610 ReleaseSuccessors(&EntrySU);
611
612 // Add all leaves to Available queue.
613 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
614 // It is available if it has no predecessors.
615 bool available = SUnits[i].Preds.empty();
616 if (available) {
617 AvailableQueue.push(&SUnits[i]);
618 SUnits[i].isAvailable = true;
619 }
620 }
621
622 // In any cycle where we can't schedule any instructions, we must
623 // stall or emit a noop, depending on the target.
624 bool CycleHasInsts = false;
625
626 // While Available queue is not empty, grab the node with the highest
627 // priority. If it is not ready put it back. Schedule the node.
628 std::vector<SUnit*> NotReady;
629 Sequence.reserve(SUnits.size());
630 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
631 // Check to see if any of the pending instructions are ready to issue. If
632 // so, add them to the available queue.
633 unsigned MinDepth = ~0u;
634 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
635 if (PendingQueue[i]->getDepth() <= CurCycle) {
636 AvailableQueue.push(PendingQueue[i]);
637 PendingQueue[i]->isAvailable = true;
638 PendingQueue[i] = PendingQueue.back();
639 PendingQueue.pop_back();
640 --i; --e;
641 } else if (PendingQueue[i]->getDepth() < MinDepth)
642 MinDepth = PendingQueue[i]->getDepth();
643 }
644
645 DEBUG(dbgs() << "\n*** Examining Available\n";
646 LatencyPriorityQueue q = AvailableQueue;
647 while (!q.empty()) {
648 SUnit *su = q.pop();
649 dbgs() << "Height " << su->getHeight() << ": ";
650 su->dump(this);
651 });
652
653 SUnit *FoundSUnit = 0;
654 bool HasNoopHazards = false;
655 while (!AvailableQueue.empty()) {
656 SUnit *CurSUnit = AvailableQueue.pop();
657
658 ScheduleHazardRecognizer::HazardType HT =
659 HazardRec->getHazardType(CurSUnit);
660 if (HT == ScheduleHazardRecognizer::NoHazard) {
661 FoundSUnit = CurSUnit;
662 break;
663 }
664
665 // Remember if this is a noop hazard.
666 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
667
668 NotReady.push_back(CurSUnit);
669 }
670
671 // Add the nodes that aren't ready back onto the available list.
672 if (!NotReady.empty()) {
673 AvailableQueue.push_all(NotReady);
674 NotReady.clear();
675 }
676
677 // If we found a node to schedule...
678 if (FoundSUnit) {
679 // ... schedule the node...
680 ScheduleNodeTopDown(FoundSUnit, CurCycle);
681 HazardRec->EmitInstruction(FoundSUnit);
682 CycleHasInsts = true;
683
684 // If we are using the target-specific hazards, then don't
685 // advance the cycle time just because we schedule a node. If
686 // the target allows it we can schedule multiple nodes in the
687 // same cycle.
688 if (!EnablePostRAHazardAvoidance) {
689 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
690 ++CurCycle;
691 }
692 } else {
693 if (CycleHasInsts) {
694 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
695 HazardRec->AdvanceCycle();
696 } else if (!HasNoopHazards) {
697 // Otherwise, we have a pipeline stall, but no other problem,
698 // just advance the current cycle and try again.
699 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
700 HazardRec->AdvanceCycle();
701 ++NumStalls;
702 } else {
703 // Otherwise, we have no instructions to issue and we have instructions
704 // that will fault if we don't do this right. This is the case for
705 // processors without pipeline interlocks and other cases.
706 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
707 HazardRec->EmitNoop();
708 Sequence.push_back(0); // NULL here means noop
709 ++NumNoops;
710 }
711
712 ++CurCycle;
713 CycleHasInsts = false;
714 }
715 }
716
717#ifndef NDEBUG
718 VerifySchedule(/*isBottomUp=*/false);
719#endif
720}
721
722//===----------------------------------------------------------------------===//
723// Public Constructor Functions
724//===----------------------------------------------------------------------===//
725
726FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
727 return new PostRAScheduler(OptLevel);
728}