1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "regalloc"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/Value.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineDominators.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/Target/TargetRegisterInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetMachine.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/ADT/DenseSet.h"
35#include "llvm/ADT/STLExtras.h"
36#include "LiveRangeCalc.h"
37#include "VirtRegMap.h"
38#include <algorithm>
39#include <limits>
40#include <cmath>
41using namespace llvm;
42
43// Switch to the new experimental algorithm for computing live intervals.
44static cl::opt<bool>
45NewLiveIntervals("new-live-intervals", cl::Hidden,
46                 cl::desc("Use new algorithm forcomputing live intervals"));
47
48char LiveIntervals::ID = 0;
49char &llvm::LiveIntervalsID = LiveIntervals::ID;
50INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
51                "Live Interval Analysis", false, false)
52INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
53INITIALIZE_PASS_DEPENDENCY(LiveVariables)
54INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
55INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
56INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
57                "Live Interval Analysis", false, false)
58
59void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
60  AU.setPreservesCFG();
61  AU.addRequired<AliasAnalysis>();
62  AU.addPreserved<AliasAnalysis>();
63  AU.addRequired<LiveVariables>();
64  AU.addPreserved<LiveVariables>();
65  AU.addPreservedID(MachineLoopInfoID);
66  AU.addRequiredTransitiveID(MachineDominatorsID);
67  AU.addPreservedID(MachineDominatorsID);
68  AU.addPreserved<SlotIndexes>();
69  AU.addRequiredTransitive<SlotIndexes>();
70  MachineFunctionPass::getAnalysisUsage(AU);
71}
72
73LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
74  DomTree(0), LRCalc(0) {
75  initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
76}
77
78LiveIntervals::~LiveIntervals() {
79  delete LRCalc;
80}
81
82void LiveIntervals::releaseMemory() {
83  // Free the live intervals themselves.
84  for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
85    delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
86  VirtRegIntervals.clear();
87  RegMaskSlots.clear();
88  RegMaskBits.clear();
89  RegMaskBlocks.clear();
90
91  for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
92    delete RegUnitIntervals[i];
93  RegUnitIntervals.clear();
94
95  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
96  VNInfoAllocator.Reset();
97}
98
99/// runOnMachineFunction - Register allocate the whole function
100///
101bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
102  MF = &fn;
103  MRI = &MF->getRegInfo();
104  TM = &fn.getTarget();
105  TRI = TM->getRegisterInfo();
106  TII = TM->getInstrInfo();
107  AA = &getAnalysis<AliasAnalysis>();
108  LV = &getAnalysis<LiveVariables>();
109  Indexes = &getAnalysis<SlotIndexes>();
110  DomTree = &getAnalysis<MachineDominatorTree>();
111  if (!LRCalc)
112    LRCalc = new LiveRangeCalc();
113
114  // Allocate space for all virtual registers.
115  VirtRegIntervals.resize(MRI->getNumVirtRegs());
116
117  if (NewLiveIntervals) {
118    // This is the new way of computing live intervals.
119    // It is independent of LiveVariables, and it can run at any time.
120    computeVirtRegs();
121    computeRegMasks();
122  } else {
123    // This is the old way of computing live intervals.
124    // It depends on LiveVariables.
125    computeIntervals();
126  }
127  computeLiveInRegUnits();
128
129  DEBUG(dump());
130  return true;
131}
132
133/// print - Implement the dump method.
134void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
135  OS << "********** INTERVALS **********\n";
136
137  // Dump the regunits.
138  for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
139    if (LiveInterval *LI = RegUnitIntervals[i])
140      OS << PrintRegUnit(i, TRI) << " = " << *LI << '\n';
141
142  // Dump the virtregs.
143  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
144    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
145    if (hasInterval(Reg))
146      OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
147  }
148
149  printInstrs(OS);
150}
151
152void LiveIntervals::printInstrs(raw_ostream &OS) const {
153  OS << "********** MACHINEINSTRS **********\n";
154  MF->print(OS, Indexes);
155}
156
157#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
158void LiveIntervals::dumpInstrs() const {
159  printInstrs(dbgs());
160}
161#endif
162
163static
164bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
165  unsigned Reg = MI.getOperand(MOIdx).getReg();
166  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
167    const MachineOperand &MO = MI.getOperand(i);
168    if (!MO.isReg())
169      continue;
170    if (MO.getReg() == Reg && MO.isDef()) {
171      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
172             MI.getOperand(MOIdx).getSubReg() &&
173             (MO.getSubReg() || MO.isImplicit()));
174      return true;
175    }
176  }
177  return false;
178}
179
180/// isPartialRedef - Return true if the specified def at the specific index is
181/// partially re-defining the specified live interval. A common case of this is
182/// a definition of the sub-register.
183bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
184                                   LiveInterval &interval) {
185  if (!MO.getSubReg() || MO.isEarlyClobber())
186    return false;
187
188  SlotIndex RedefIndex = MIIdx.getRegSlot();
189  const LiveRange *OldLR =
190    interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
191  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
192  if (DefMI != 0) {
193    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
194  }
195  return false;
196}
197
198void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
199                                             MachineBasicBlock::iterator mi,
200                                             SlotIndex MIIdx,
201                                             MachineOperand& MO,
202                                             unsigned MOIdx,
203                                             LiveInterval &interval) {
204  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI));
205
206  // Virtual registers may be defined multiple times (due to phi
207  // elimination and 2-addr elimination).  Much of what we do only has to be
208  // done once for the vreg.  We use an empty interval to detect the first
209  // time we see a vreg.
210  LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg);
211  if (interval.empty()) {
212    // Get the Idx of the defining instructions.
213    SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
214
215    // Make sure the first definition is not a partial redefinition.
216    assert(!MO.readsReg() && "First def cannot also read virtual register "
217           "missing <undef> flag?");
218
219    VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
220    assert(ValNo->id == 0 && "First value in interval is not 0?");
221
222    // Loop over all of the blocks that the vreg is defined in.  There are
223    // two cases we have to handle here.  The most common case is a vreg
224    // whose lifetime is contained within a basic block.  In this case there
225    // will be a single kill, in MBB, which comes after the definition.
226    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
227      // FIXME: what about dead vars?
228      SlotIndex killIdx;
229      if (vi.Kills[0] != mi)
230        killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
231      else
232        killIdx = defIndex.getDeadSlot();
233
234      // If the kill happens after the definition, we have an intra-block
235      // live range.
236      if (killIdx > defIndex) {
237        assert(vi.AliveBlocks.empty() &&
238               "Shouldn't be alive across any blocks!");
239        LiveRange LR(defIndex, killIdx, ValNo);
240        interval.addRange(LR);
241        DEBUG(dbgs() << " +" << LR << "\n");
242        return;
243      }
244    }
245
246    // The other case we handle is when a virtual register lives to the end
247    // of the defining block, potentially live across some blocks, then is
248    // live into some number of blocks, but gets killed.  Start by adding a
249    // range that goes from this definition to the end of the defining block.
250    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
251    DEBUG(dbgs() << " +" << NewLR);
252    interval.addRange(NewLR);
253
254    bool PHIJoin = LV->isPHIJoin(interval.reg);
255
256    if (PHIJoin) {
257      // A phi join register is killed at the end of the MBB and revived as a
258      // new valno in the killing blocks.
259      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
260      DEBUG(dbgs() << " phi-join");
261    } else {
262      // Iterate over all of the blocks that the variable is completely
263      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
264      // live interval.
265      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
266               E = vi.AliveBlocks.end(); I != E; ++I) {
267        MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I);
268        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock),
269                     ValNo);
270        interval.addRange(LR);
271        DEBUG(dbgs() << " +" << LR);
272      }
273    }
274
275    // Finally, this virtual register is live from the start of any killing
276    // block to the 'use' slot of the killing instruction.
277    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
278      MachineInstr *Kill = vi.Kills[i];
279      SlotIndex Start = getMBBStartIdx(Kill->getParent());
280      SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
281
282      // Create interval with one of a NEW value number.  Note that this value
283      // number isn't actually defined by an instruction, weird huh? :)
284      if (PHIJoin) {
285        assert(getInstructionFromIndex(Start) == 0 &&
286               "PHI def index points at actual instruction.");
287        ValNo = interval.getNextValue(Start, VNInfoAllocator);
288      }
289      LiveRange LR(Start, killIdx, ValNo);
290      interval.addRange(LR);
291      DEBUG(dbgs() << " +" << LR);
292    }
293
294  } else {
295    if (MultipleDefsBySameMI(*mi, MOIdx))
296      // Multiple defs of the same virtual register by the same instruction.
297      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
298      // This is likely due to elimination of REG_SEQUENCE instructions. Return
299      // here since there is nothing to do.
300      return;
301
302    // If this is the second time we see a virtual register definition, it
303    // must be due to phi elimination or two addr elimination.  If this is
304    // the result of two address elimination, then the vreg is one of the
305    // def-and-use register operand.
306
307    // It may also be partial redef like this:
308    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
309    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
310    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
311    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
312      // If this is a two-address definition, then we have already processed
313      // the live range.  The only problem is that we didn't realize there
314      // are actually two values in the live interval.  Because of this we
315      // need to take the LiveRegion that defines this register and split it
316      // into two values.
317      SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
318
319      const LiveRange *OldLR =
320        interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
321      VNInfo *OldValNo = OldLR->valno;
322      SlotIndex DefIndex = OldValNo->def.getRegSlot();
323
324      // Delete the previous value, which should be short and continuous,
325      // because the 2-addr copy must be in the same MBB as the redef.
326      interval.removeRange(DefIndex, RedefIndex);
327
328      // The new value number (#1) is defined by the instruction we claimed
329      // defined value #0.
330      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
331
332      // Value#0 is now defined by the 2-addr instruction.
333      OldValNo->def = RedefIndex;
334
335      // Add the new live interval which replaces the range for the input copy.
336      LiveRange LR(DefIndex, RedefIndex, ValNo);
337      DEBUG(dbgs() << " replace range with " << LR);
338      interval.addRange(LR);
339
340      // If this redefinition is dead, we need to add a dummy unit live
341      // range covering the def slot.
342      if (MO.isDead())
343        interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
344                                    OldValNo));
345
346      DEBUG(dbgs() << " RESULT: " << interval);
347    } else if (LV->isPHIJoin(interval.reg)) {
348      // In the case of PHI elimination, each variable definition is only
349      // live until the end of the block.  We've already taken care of the
350      // rest of the live range.
351
352      SlotIndex defIndex = MIIdx.getRegSlot();
353      if (MO.isEarlyClobber())
354        defIndex = MIIdx.getRegSlot(true);
355
356      VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
357
358      SlotIndex killIndex = getMBBEndIdx(mbb);
359      LiveRange LR(defIndex, killIndex, ValNo);
360      interval.addRange(LR);
361      DEBUG(dbgs() << " phi-join +" << LR);
362    } else {
363      llvm_unreachable("Multiply defined register");
364    }
365  }
366
367  DEBUG(dbgs() << '\n');
368}
369
370void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
371                                      MachineBasicBlock::iterator MI,
372                                      SlotIndex MIIdx,
373                                      MachineOperand& MO,
374                                      unsigned MOIdx) {
375  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
376    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
377                             getOrCreateInterval(MO.getReg()));
378}
379
380/// computeIntervals - computes the live intervals for virtual
381/// registers. for some ordering of the machine instructions [1,N] a
382/// live interval is an interval [i, j) where 1 <= i <= j < N for
383/// which a variable is live
384void LiveIntervals::computeIntervals() {
385  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
386               << "********** Function: " << MF->getName() << '\n');
387
388  RegMaskBlocks.resize(MF->getNumBlockIDs());
389
390  SmallVector<unsigned, 8> UndefUses;
391  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
392       MBBI != E; ++MBBI) {
393    MachineBasicBlock *MBB = MBBI;
394    RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
395
396    if (MBB->empty())
397      continue;
398
399    // Track the index of the current machine instr.
400    SlotIndex MIIndex = getMBBStartIdx(MBB);
401    DEBUG(dbgs() << "BB#" << MBB->getNumber()
402          << ":\t\t# derived from " << MBB->getName() << "\n");
403
404    // Skip over empty initial indices.
405    if (getInstructionFromIndex(MIIndex) == 0)
406      MIIndex = Indexes->getNextNonNullIndex(MIIndex);
407
408    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
409         MI != miEnd; ++MI) {
410      DEBUG(dbgs() << MIIndex << "\t" << *MI);
411      if (MI->isDebugValue())
412        continue;
413      assert(Indexes->getInstructionFromIndex(MIIndex) == MI &&
414             "Lost SlotIndex synchronization");
415
416      // Handle defs.
417      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
418        MachineOperand &MO = MI->getOperand(i);
419
420        // Collect register masks.
421        if (MO.isRegMask()) {
422          RegMaskSlots.push_back(MIIndex.getRegSlot());
423          RegMaskBits.push_back(MO.getRegMask());
424          continue;
425        }
426
427        if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
428          continue;
429
430        // handle register defs - build intervals
431        if (MO.isDef())
432          handleRegisterDef(MBB, MI, MIIndex, MO, i);
433        else if (MO.isUndef())
434          UndefUses.push_back(MO.getReg());
435      }
436
437      // Move to the next instr slot.
438      MIIndex = Indexes->getNextNonNullIndex(MIIndex);
439    }
440
441    // Compute the number of register mask instructions in this block.
442    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
443    RMB.second = RegMaskSlots.size() - RMB.first;
444  }
445
446  // Create empty intervals for registers defined by implicit_def's (except
447  // for those implicit_def that define values which are liveout of their
448  // blocks.
449  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
450    unsigned UndefReg = UndefUses[i];
451    (void)getOrCreateInterval(UndefReg);
452  }
453}
454
455LiveInterval* LiveIntervals::createInterval(unsigned reg) {
456  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
457  return new LiveInterval(reg, Weight);
458}
459
460
461/// computeVirtRegInterval - Compute the live interval of a virtual register,
462/// based on defs and uses.
463void LiveIntervals::computeVirtRegInterval(LiveInterval *LI) {
464  assert(LRCalc && "LRCalc not initialized.");
465  assert(LI->empty() && "Should only compute empty intervals.");
466  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
467  LRCalc->createDeadDefs(LI);
468  LRCalc->extendToUses(LI);
469}
470
471void LiveIntervals::computeVirtRegs() {
472  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
473    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
474    if (MRI->reg_nodbg_empty(Reg))
475      continue;
476    LiveInterval *LI = createInterval(Reg);
477    VirtRegIntervals[Reg] = LI;
478    computeVirtRegInterval(LI);
479  }
480}
481
482void LiveIntervals::computeRegMasks() {
483  RegMaskBlocks.resize(MF->getNumBlockIDs());
484
485  // Find all instructions with regmask operands.
486  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
487       MBBI != E; ++MBBI) {
488    MachineBasicBlock *MBB = MBBI;
489    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
490    RMB.first = RegMaskSlots.size();
491    for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end();
492         MI != ME; ++MI)
493      for (MIOperands MO(MI); MO.isValid(); ++MO) {
494        if (!MO->isRegMask())
495          continue;
496          RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
497          RegMaskBits.push_back(MO->getRegMask());
498      }
499    // Compute the number of register mask instructions in this block.
500    RMB.second = RegMaskSlots.size() - RMB.first;
501  }
502}
503
504//===----------------------------------------------------------------------===//
505//                           Register Unit Liveness
506//===----------------------------------------------------------------------===//
507//
508// Fixed interference typically comes from ABI boundaries: Function arguments
509// and return values are passed in fixed registers, and so are exception
510// pointers entering landing pads. Certain instructions require values to be
511// present in specific registers. That is also represented through fixed
512// interference.
513//
514
515/// computeRegUnitInterval - Compute the live interval of a register unit, based
516/// on the uses and defs of aliasing registers.  The interval should be empty,
517/// or contain only dead phi-defs from ABI blocks.
518void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) {
519  unsigned Unit = LI->reg;
520
521  assert(LRCalc && "LRCalc not initialized.");
522  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
523
524  // The physregs aliasing Unit are the roots and their super-registers.
525  // Create all values as dead defs before extending to uses. Note that roots
526  // may share super-registers. That's OK because createDeadDefs() is
527  // idempotent. It is very rare for a register unit to have multiple roots, so
528  // uniquing super-registers is probably not worthwhile.
529  for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
530    unsigned Root = *Roots;
531    if (!MRI->reg_empty(Root))
532      LRCalc->createDeadDefs(LI, Root);
533    for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
534      if (!MRI->reg_empty(*Supers))
535        LRCalc->createDeadDefs(LI, *Supers);
536    }
537  }
538
539  // Now extend LI to reach all uses.
540  // Ignore uses of reserved registers. We only track defs of those.
541  for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
542    unsigned Root = *Roots;
543    if (!MRI->isReserved(Root) && !MRI->reg_empty(Root))
544      LRCalc->extendToUses(LI, Root);
545    for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
546      unsigned Reg = *Supers;
547      if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
548        LRCalc->extendToUses(LI, Reg);
549    }
550  }
551}
552
553
554/// computeLiveInRegUnits - Precompute the live ranges of any register units
555/// that are live-in to an ABI block somewhere. Register values can appear
556/// without a corresponding def when entering the entry block or a landing pad.
557///
558void LiveIntervals::computeLiveInRegUnits() {
559  RegUnitIntervals.resize(TRI->getNumRegUnits());
560  DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
561
562  // Keep track of the intervals allocated.
563  SmallVector<LiveInterval*, 8> NewIntvs;
564
565  // Check all basic blocks for live-ins.
566  for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
567       MFI != MFE; ++MFI) {
568    const MachineBasicBlock *MBB = MFI;
569
570    // We only care about ABI blocks: Entry + landing pads.
571    if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty())
572      continue;
573
574    // Create phi-defs at Begin for all live-in registers.
575    SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
576    DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
577    for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(),
578         LIE = MBB->livein_end(); LII != LIE; ++LII) {
579      for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) {
580        unsigned Unit = *Units;
581        LiveInterval *Intv = RegUnitIntervals[Unit];
582        if (!Intv) {
583          Intv = RegUnitIntervals[Unit] = new LiveInterval(Unit, HUGE_VALF);
584          NewIntvs.push_back(Intv);
585        }
586        VNInfo *VNI = Intv->createDeadDef(Begin, getVNInfoAllocator());
587        (void)VNI;
588        DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
589      }
590    }
591    DEBUG(dbgs() << '\n');
592  }
593  DEBUG(dbgs() << "Created " << NewIntvs.size() << " new intervals.\n");
594
595  // Compute the 'normal' part of the intervals.
596  for (unsigned i = 0, e = NewIntvs.size(); i != e; ++i)
597    computeRegUnitInterval(NewIntvs[i]);
598}
599
600
601/// shrinkToUses - After removing some uses of a register, shrink its live
602/// range to just the remaining uses. This method does not compute reaching
603/// defs for new uses, and it doesn't remove dead defs.
604bool LiveIntervals::shrinkToUses(LiveInterval *li,
605                                 SmallVectorImpl<MachineInstr*> *dead) {
606  DEBUG(dbgs() << "Shrink: " << *li << '\n');
607  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
608         && "Can only shrink virtual registers");
609  // Find all the values used, including PHI kills.
610  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
611
612  // Blocks that have already been added to WorkList as live-out.
613  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
614
615  // Visit all instructions reading li->reg.
616  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
617       MachineInstr *UseMI = I.skipInstruction();) {
618    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
619      continue;
620    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
621    LiveRangeQuery LRQ(*li, Idx);
622    VNInfo *VNI = LRQ.valueIn();
623    if (!VNI) {
624      // This shouldn't happen: readsVirtualRegister returns true, but there is
625      // no live value. It is likely caused by a target getting <undef> flags
626      // wrong.
627      DEBUG(dbgs() << Idx << '\t' << *UseMI
628                   << "Warning: Instr claims to read non-existent value in "
629                    << *li << '\n');
630      continue;
631    }
632    // Special case: An early-clobber tied operand reads and writes the
633    // register one slot early.
634    if (VNInfo *DefVNI = LRQ.valueDefined())
635      Idx = DefVNI->def;
636
637    WorkList.push_back(std::make_pair(Idx, VNI));
638  }
639
640  // Create a new live interval with only minimal live segments per def.
641  LiveInterval NewLI(li->reg, 0);
642  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
643       I != E; ++I) {
644    VNInfo *VNI = *I;
645    if (VNI->isUnused())
646      continue;
647    NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
648  }
649
650  // Keep track of the PHIs that are in use.
651  SmallPtrSet<VNInfo*, 8> UsedPHIs;
652
653  // Extend intervals to reach all uses in WorkList.
654  while (!WorkList.empty()) {
655    SlotIndex Idx = WorkList.back().first;
656    VNInfo *VNI = WorkList.back().second;
657    WorkList.pop_back();
658    const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
659    SlotIndex BlockStart = getMBBStartIdx(MBB);
660
661    // Extend the live range for VNI to be live at Idx.
662    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
663      (void)ExtVNI;
664      assert(ExtVNI == VNI && "Unexpected existing value number");
665      // Is this a PHIDef we haven't seen before?
666      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
667        continue;
668      // The PHI is live, make sure the predecessors are live-out.
669      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
670           PE = MBB->pred_end(); PI != PE; ++PI) {
671        if (!LiveOut.insert(*PI))
672          continue;
673        SlotIndex Stop = getMBBEndIdx(*PI);
674        // A predecessor is not required to have a live-out value for a PHI.
675        if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
676          WorkList.push_back(std::make_pair(Stop, PVNI));
677      }
678      continue;
679    }
680
681    // VNI is live-in to MBB.
682    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
683    NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
684
685    // Make sure VNI is live-out from the predecessors.
686    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
687         PE = MBB->pred_end(); PI != PE; ++PI) {
688      if (!LiveOut.insert(*PI))
689        continue;
690      SlotIndex Stop = getMBBEndIdx(*PI);
691      assert(li->getVNInfoBefore(Stop) == VNI &&
692             "Wrong value out of predecessor");
693      WorkList.push_back(std::make_pair(Stop, VNI));
694    }
695  }
696
697  // Handle dead values.
698  bool CanSeparate = false;
699  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
700       I != E; ++I) {
701    VNInfo *VNI = *I;
702    if (VNI->isUnused())
703      continue;
704    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
705    assert(LII != NewLI.end() && "Missing live range for PHI");
706    if (LII->end != VNI->def.getDeadSlot())
707      continue;
708    if (VNI->isPHIDef()) {
709      // This is a dead PHI. Remove it.
710      VNI->markUnused();
711      NewLI.removeRange(*LII);
712      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
713      CanSeparate = true;
714    } else {
715      // This is a dead def. Make sure the instruction knows.
716      MachineInstr *MI = getInstructionFromIndex(VNI->def);
717      assert(MI && "No instruction defining live value");
718      MI->addRegisterDead(li->reg, TRI);
719      if (dead && MI->allDefsAreDead()) {
720        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
721        dead->push_back(MI);
722      }
723    }
724  }
725
726  // Move the trimmed ranges back.
727  li->ranges.swap(NewLI.ranges);
728  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
729  return CanSeparate;
730}
731
732void LiveIntervals::extendToIndices(LiveInterval *LI,
733                                    ArrayRef<SlotIndex> Indices) {
734  assert(LRCalc && "LRCalc not initialized.");
735  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
736  for (unsigned i = 0, e = Indices.size(); i != e; ++i)
737    LRCalc->extend(LI, Indices[i]);
738}
739
740void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
741                               SmallVectorImpl<SlotIndex> *EndPoints) {
742  LiveRangeQuery LRQ(*LI, Kill);
743  VNInfo *VNI = LRQ.valueOut();
744  if (!VNI)
745    return;
746
747  MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
748  SlotIndex MBBStart, MBBEnd;
749  tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
750
751  // If VNI isn't live out from KillMBB, the value is trivially pruned.
752  if (LRQ.endPoint() < MBBEnd) {
753    LI->removeRange(Kill, LRQ.endPoint());
754    if (EndPoints) EndPoints->push_back(LRQ.endPoint());
755    return;
756  }
757
758  // VNI is live out of KillMBB.
759  LI->removeRange(Kill, MBBEnd);
760  if (EndPoints) EndPoints->push_back(MBBEnd);
761
762  // Find all blocks that are reachable from KillMBB without leaving VNI's live
763  // range. It is possible that KillMBB itself is reachable, so start a DFS
764  // from each successor.
765  typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
766  VisitedTy Visited;
767  for (MachineBasicBlock::succ_iterator
768       SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
769       SuccI != SuccE; ++SuccI) {
770    for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
771         I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
772         I != E;) {
773      MachineBasicBlock *MBB = *I;
774
775      // Check if VNI is live in to MBB.
776      tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
777      LiveRangeQuery LRQ(*LI, MBBStart);
778      if (LRQ.valueIn() != VNI) {
779        // This block isn't part of the VNI live range. Prune the search.
780        I.skipChildren();
781        continue;
782      }
783
784      // Prune the search if VNI is killed in MBB.
785      if (LRQ.endPoint() < MBBEnd) {
786        LI->removeRange(MBBStart, LRQ.endPoint());
787        if (EndPoints) EndPoints->push_back(LRQ.endPoint());
788        I.skipChildren();
789        continue;
790      }
791
792      // VNI is live through MBB.
793      LI->removeRange(MBBStart, MBBEnd);
794      if (EndPoints) EndPoints->push_back(MBBEnd);
795      ++I;
796    }
797  }
798}
799
800//===----------------------------------------------------------------------===//
801// Register allocator hooks.
802//
803
804void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
805  // Keep track of regunit ranges.
806  SmallVector<std::pair<LiveInterval*, LiveInterval::iterator>, 8> RU;
807
808  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
809    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
810    if (MRI->reg_nodbg_empty(Reg))
811      continue;
812    LiveInterval *LI = &getInterval(Reg);
813    if (LI->empty())
814      continue;
815
816    // Find the regunit intervals for the assigned register. They may overlap
817    // the virtual register live range, cancelling any kills.
818    RU.clear();
819    for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
820         ++Units) {
821      LiveInterval *RUInt = &getRegUnit(*Units);
822      if (RUInt->empty())
823        continue;
824      RU.push_back(std::make_pair(RUInt, RUInt->find(LI->begin()->end)));
825    }
826
827    // Every instruction that kills Reg corresponds to a live range end point.
828    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
829         ++RI) {
830      // A block index indicates an MBB edge.
831      if (RI->end.isBlock())
832        continue;
833      MachineInstr *MI = getInstructionFromIndex(RI->end);
834      if (!MI)
835        continue;
836
837      // Check if any of the reguints are live beyond the end of RI. That could
838      // happen when a physreg is defined as a copy of a virtreg:
839      //
840      //   %EAX = COPY %vreg5
841      //   FOO %vreg5         <--- MI, cancel kill because %EAX is live.
842      //   BAR %EAX<kill>
843      //
844      // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
845      bool CancelKill = false;
846      for (unsigned u = 0, e = RU.size(); u != e; ++u) {
847        LiveInterval *RInt = RU[u].first;
848        LiveInterval::iterator &I = RU[u].second;
849        if (I == RInt->end())
850          continue;
851        I = RInt->advanceTo(I, RI->end);
852        if (I == RInt->end() || I->start >= RI->end)
853          continue;
854        // I is overlapping RI.
855        CancelKill = true;
856        break;
857      }
858      if (CancelKill)
859        MI->clearRegisterKills(Reg, NULL);
860      else
861        MI->addRegisterKilled(Reg, NULL);
862    }
863  }
864}
865
866MachineBasicBlock*
867LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
868  // A local live range must be fully contained inside the block, meaning it is
869  // defined and killed at instructions, not at block boundaries. It is not
870  // live in or or out of any block.
871  //
872  // It is technically possible to have a PHI-defined live range identical to a
873  // single block, but we are going to return false in that case.
874
875  SlotIndex Start = LI.beginIndex();
876  if (Start.isBlock())
877    return NULL;
878
879  SlotIndex Stop = LI.endIndex();
880  if (Stop.isBlock())
881    return NULL;
882
883  // getMBBFromIndex doesn't need to search the MBB table when both indexes
884  // belong to proper instructions.
885  MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
886  MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
887  return MBB1 == MBB2 ? MBB1 : NULL;
888}
889
890bool
891LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
892  for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
893       I != E; ++I) {
894    const VNInfo *PHI = *I;
895    if (PHI->isUnused() || !PHI->isPHIDef())
896      continue;
897    const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
898    // Conservatively return true instead of scanning huge predecessor lists.
899    if (PHIMBB->pred_size() > 100)
900      return true;
901    for (MachineBasicBlock::const_pred_iterator
902         PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
903      if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
904        return true;
905  }
906  return false;
907}
908
909float
910LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
911  // Limit the loop depth ridiculousness.
912  if (loopDepth > 200)
913    loopDepth = 200;
914
915  // The loop depth is used to roughly estimate the number of times the
916  // instruction is executed. Something like 10^d is simple, but will quickly
917  // overflow a float. This expression behaves like 10^d for small d, but is
918  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
919  // headroom before overflow.
920  // By the way, powf() might be unavailable here. For consistency,
921  // We may take pow(double,double).
922  float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
923
924  return (isDef + isUse) * lc;
925}
926
927LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
928                                                  MachineInstr* startInst) {
929  LiveInterval& Interval = getOrCreateInterval(reg);
930  VNInfo* VN = Interval.getNextValue(
931    SlotIndex(getInstructionIndex(startInst).getRegSlot()),
932    getVNInfoAllocator());
933  LiveRange LR(
934     SlotIndex(getInstructionIndex(startInst).getRegSlot()),
935     getMBBEndIdx(startInst->getParent()), VN);
936  Interval.addRange(LR);
937
938  return LR;
939}
940
941
942//===----------------------------------------------------------------------===//
943//                          Register mask functions
944//===----------------------------------------------------------------------===//
945
946bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
947                                             BitVector &UsableRegs) {
948  if (LI.empty())
949    return false;
950  LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
951
952  // Use a smaller arrays for local live ranges.
953  ArrayRef<SlotIndex> Slots;
954  ArrayRef<const uint32_t*> Bits;
955  if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
956    Slots = getRegMaskSlotsInBlock(MBB->getNumber());
957    Bits = getRegMaskBitsInBlock(MBB->getNumber());
958  } else {
959    Slots = getRegMaskSlots();
960    Bits = getRegMaskBits();
961  }
962
963  // We are going to enumerate all the register mask slots contained in LI.
964  // Start with a binary search of RegMaskSlots to find a starting point.
965  ArrayRef<SlotIndex>::iterator SlotI =
966    std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
967  ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
968
969  // No slots in range, LI begins after the last call.
970  if (SlotI == SlotE)
971    return false;
972
973  bool Found = false;
974  for (;;) {
975    assert(*SlotI >= LiveI->start);
976    // Loop over all slots overlapping this segment.
977    while (*SlotI < LiveI->end) {
978      // *SlotI overlaps LI. Collect mask bits.
979      if (!Found) {
980        // This is the first overlap. Initialize UsableRegs to all ones.
981        UsableRegs.clear();
982        UsableRegs.resize(TRI->getNumRegs(), true);
983        Found = true;
984      }
985      // Remove usable registers clobbered by this mask.
986      UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
987      if (++SlotI == SlotE)
988        return Found;
989    }
990    // *SlotI is beyond the current LI segment.
991    LiveI = LI.advanceTo(LiveI, *SlotI);
992    if (LiveI == LiveE)
993      return Found;
994    // Advance SlotI until it overlaps.
995    while (*SlotI < LiveI->start)
996      if (++SlotI == SlotE)
997        return Found;
998  }
999}
1000
1001//===----------------------------------------------------------------------===//
1002//                         IntervalUpdate class.
1003//===----------------------------------------------------------------------===//
1004
1005// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
1006class LiveIntervals::HMEditor {
1007private:
1008  LiveIntervals& LIS;
1009  const MachineRegisterInfo& MRI;
1010  const TargetRegisterInfo& TRI;
1011  SlotIndex NewIdx;
1012
1013  typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
1014  typedef DenseSet<IntRangePair> RangeSet;
1015
1016  struct RegRanges {
1017    LiveRange* Use;
1018    LiveRange* EC;
1019    LiveRange* Dead;
1020    LiveRange* Def;
1021    RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
1022  };
1023  typedef DenseMap<unsigned, RegRanges> BundleRanges;
1024
1025public:
1026  HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
1027           const TargetRegisterInfo& TRI, SlotIndex NewIdx)
1028    : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
1029
1030  // Update intervals for all operands of MI from OldIdx to NewIdx.
1031  // This assumes that MI used to be at OldIdx, and now resides at
1032  // NewIdx.
1033  void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
1034    assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
1035
1036    // Collect the operands.
1037    RangeSet Entering, Internal, Exiting;
1038    bool hasRegMaskOp = false;
1039    collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
1040
1041    // To keep the LiveRanges valid within an interval, move the ranges closest
1042    // to the destination first. This prevents ranges from overlapping, to that
1043    // APIs like removeRange still work.
1044    if (NewIdx < OldIdx) {
1045      moveAllEnteringFrom(OldIdx, Entering);
1046      moveAllInternalFrom(OldIdx, Internal);
1047      moveAllExitingFrom(OldIdx, Exiting);
1048    }
1049    else {
1050      moveAllExitingFrom(OldIdx, Exiting);
1051      moveAllInternalFrom(OldIdx, Internal);
1052      moveAllEnteringFrom(OldIdx, Entering);
1053    }
1054
1055    if (hasRegMaskOp)
1056      updateRegMaskSlots(OldIdx);
1057
1058#ifndef NDEBUG
1059    LIValidator validator;
1060    validator = std::for_each(Entering.begin(), Entering.end(), validator);
1061    validator = std::for_each(Internal.begin(), Internal.end(), validator);
1062    validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
1063    assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
1064#endif
1065
1066  }
1067
1068  // Update intervals for all operands of MI to refer to BundleStart's
1069  // SlotIndex.
1070  void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
1071    if (MI == BundleStart)
1072      return; // Bundling instr with itself - nothing to do.
1073
1074    SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
1075    assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
1076           "SlotIndex <-> Instruction mapping broken for MI");
1077
1078    // Collect all ranges already in the bundle.
1079    MachineBasicBlock::instr_iterator BII(BundleStart);
1080    RangeSet Entering, Internal, Exiting;
1081    bool hasRegMaskOp = false;
1082    collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
1083    assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
1084    for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
1085      if (&*BII == MI)
1086        continue;
1087      collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
1088      assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
1089    }
1090
1091    BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
1092
1093    Entering.clear();
1094    Internal.clear();
1095    Exiting.clear();
1096    collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
1097    assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
1098
1099    DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
1100    DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
1101    DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
1102
1103    moveAllEnteringFromInto(OldIdx, Entering, BR);
1104    moveAllInternalFromInto(OldIdx, Internal, BR);
1105    moveAllExitingFromInto(OldIdx, Exiting, BR);
1106
1107
1108#ifndef NDEBUG
1109    LIValidator validator;
1110    validator = std::for_each(Entering.begin(), Entering.end(), validator);
1111    validator = std::for_each(Internal.begin(), Internal.end(), validator);
1112    validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
1113    assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
1114#endif
1115  }
1116
1117private:
1118
1119#ifndef NDEBUG
1120  class LIValidator {
1121  private:
1122    DenseSet<const LiveInterval*> Checked, Bogus;
1123  public:
1124    void operator()(const IntRangePair& P) {
1125      const LiveInterval* LI = P.first;
1126      if (Checked.count(LI))
1127        return;
1128      Checked.insert(LI);
1129      if (LI->empty())
1130        return;
1131      SlotIndex LastEnd = LI->begin()->start;
1132      for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
1133           LRI != LRE; ++LRI) {
1134        const LiveRange& LR = *LRI;
1135        if (LastEnd > LR.start || LR.start >= LR.end)
1136          Bogus.insert(LI);
1137        LastEnd = LR.end;
1138      }
1139    }
1140
1141    bool rangesOk() const {
1142      return Bogus.empty();
1143    }
1144  };
1145#endif
1146
1147  // Collect IntRangePairs for all operands of MI that may need fixing.
1148  // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
1149  // maps).
1150  void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
1151                     RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
1152    hasRegMaskOp = false;
1153    for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
1154                                    MOE = MI->operands_end();
1155         MOI != MOE; ++MOI) {
1156      const MachineOperand& MO = *MOI;
1157
1158      if (MO.isRegMask()) {
1159        hasRegMaskOp = true;
1160        continue;
1161      }
1162
1163      if (!MO.isReg() || MO.getReg() == 0)
1164        continue;
1165
1166      unsigned Reg = MO.getReg();
1167
1168      // TODO: Currently we're skipping uses that are reserved or have no
1169      // interval, but we're not updating their kills. This should be
1170      // fixed.
1171      if (TargetRegisterInfo::isPhysicalRegister(Reg) && MRI.isReserved(Reg))
1172        continue;
1173
1174      // Collect ranges for register units. These live ranges are computed on
1175      // demand, so just skip any that haven't been computed yet.
1176      if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1177        for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
1178          if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
1179            collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx);
1180      } else {
1181        // Collect ranges for individual virtual registers.
1182        collectRanges(MO, &LIS.getInterval(Reg),
1183                      Entering, Internal, Exiting, OldIdx);
1184      }
1185    }
1186  }
1187
1188  void collectRanges(const MachineOperand &MO, LiveInterval *LI,
1189                     RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
1190                     SlotIndex OldIdx) {
1191    if (MO.readsReg()) {
1192      LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
1193      if (LR != 0)
1194        Entering.insert(std::make_pair(LI, LR));
1195    }
1196    if (MO.isDef()) {
1197      LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
1198      assert(LR != 0 && "No live range for def?");
1199      if (LR->end > OldIdx.getDeadSlot())
1200        Exiting.insert(std::make_pair(LI, LR));
1201      else
1202        Internal.insert(std::make_pair(LI, LR));
1203    }
1204  }
1205
1206  BundleRanges createBundleRanges(RangeSet& Entering,
1207                                  RangeSet& Internal,
1208                                  RangeSet& Exiting) {
1209    BundleRanges BR;
1210
1211    for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1212         EI != EE; ++EI) {
1213      LiveInterval* LI = EI->first;
1214      LiveRange* LR = EI->second;
1215      BR[LI->reg].Use = LR;
1216    }
1217
1218    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1219         II != IE; ++II) {
1220      LiveInterval* LI = II->first;
1221      LiveRange* LR = II->second;
1222      if (LR->end.isDead()) {
1223        BR[LI->reg].Dead = LR;
1224      } else {
1225        BR[LI->reg].EC = LR;
1226      }
1227    }
1228
1229    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1230         EI != EE; ++EI) {
1231      LiveInterval* LI = EI->first;
1232      LiveRange* LR = EI->second;
1233      BR[LI->reg].Def = LR;
1234    }
1235
1236    return BR;
1237  }
1238
1239  void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
1240    MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
1241    if (!OldKillMI->killsRegister(reg))
1242      return; // Bail out if we don't have kill flags on the old register.
1243    MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
1244    assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
1245    assert(!NewKillMI->killsRegister(reg) &&
1246           "New kill instr is already a kill.");
1247    OldKillMI->clearRegisterKills(reg, &TRI);
1248    NewKillMI->addRegisterKilled(reg, &TRI);
1249  }
1250
1251  void updateRegMaskSlots(SlotIndex OldIdx) {
1252    SmallVectorImpl<SlotIndex>::iterator RI =
1253      std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1254                       OldIdx);
1255    assert(*RI == OldIdx && "No RegMask at OldIdx.");
1256    *RI = NewIdx;
1257    assert(*prior(RI) < *RI && *RI < *next(RI) &&
1258           "RegSlots out of order. Did you move one call across another?");
1259  }
1260
1261  // Return the last use of reg between NewIdx and OldIdx.
1262  SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
1263    SlotIndex LastUse = NewIdx;
1264
1265    if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1266      for (MachineRegisterInfo::use_nodbg_iterator
1267             UI = MRI.use_nodbg_begin(Reg),
1268             UE = MRI.use_nodbg_end();
1269           UI != UE; UI.skipInstruction()) {
1270        const MachineInstr* MI = &*UI;
1271        SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1272        if (InstSlot > LastUse && InstSlot < OldIdx)
1273          LastUse = InstSlot;
1274      }
1275    } else {
1276      MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx);
1277      MachineBasicBlock::iterator MII(MI);
1278      ++MII;
1279      MachineBasicBlock* MBB = MI->getParent();
1280      for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){
1281        for (MachineInstr::mop_iterator MOI = MII->operands_begin(),
1282                                        MOE = MII->operands_end();
1283             MOI != MOE; ++MOI) {
1284          const MachineOperand& mop = *MOI;
1285          if (!mop.isReg() || mop.getReg() == 0 ||
1286              TargetRegisterInfo::isVirtualRegister(mop.getReg()))
1287            continue;
1288
1289          if (TRI.hasRegUnit(mop.getReg(), Reg))
1290            LastUse = LIS.getInstructionIndex(MII);
1291        }
1292      }
1293    }
1294    return LastUse;
1295  }
1296
1297  void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
1298    LiveInterval* LI = P.first;
1299    LiveRange* LR = P.second;
1300    bool LiveThrough = LR->end > OldIdx.getRegSlot();
1301    if (LiveThrough)
1302      return;
1303    SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
1304    if (LastUse != NewIdx)
1305      moveKillFlags(LI->reg, NewIdx, LastUse);
1306    LR->end = LastUse.getRegSlot(LR->end.isEarlyClobber());
1307  }
1308
1309  void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
1310    LiveInterval* LI = P.first;
1311    LiveRange* LR = P.second;
1312    // Extend the LiveRange if NewIdx is past the end.
1313    if (NewIdx > LR->end) {
1314      // Move kill flags if OldIdx was not originally the end
1315      // (otherwise LR->end points to an invalid slot).
1316      if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
1317        assert(LR->end > OldIdx && "LiveRange does not cover original slot");
1318        moveKillFlags(LI->reg, LR->end, NewIdx);
1319      }
1320      LR->end = NewIdx.getRegSlot(LR->end.isEarlyClobber());
1321    }
1322  }
1323
1324  void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
1325    bool GoingUp = NewIdx < OldIdx;
1326
1327    if (GoingUp) {
1328      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1329           EI != EE; ++EI)
1330        moveEnteringUpFrom(OldIdx, *EI);
1331    } else {
1332      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1333           EI != EE; ++EI)
1334        moveEnteringDownFrom(OldIdx, *EI);
1335    }
1336  }
1337
1338  void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
1339    LiveInterval* LI = P.first;
1340    LiveRange* LR = P.second;
1341    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
1342           LR->end <= OldIdx.getDeadSlot() &&
1343           "Range should be internal to OldIdx.");
1344    LiveRange Tmp(*LR);
1345    Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
1346    Tmp.valno->def = Tmp.start;
1347    Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
1348    LI->removeRange(*LR);
1349    LI->addRange(Tmp);
1350  }
1351
1352  void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
1353    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1354         II != IE; ++II)
1355      moveInternalFrom(OldIdx, *II);
1356  }
1357
1358  void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
1359    LiveRange* LR = P.second;
1360    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
1361           "Range should start in OldIdx.");
1362    assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
1363    SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
1364    LR->start = NewStart;
1365    LR->valno->def = NewStart;
1366  }
1367
1368  void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
1369    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1370         EI != EE; ++EI)
1371      moveExitingFrom(OldIdx, *EI);
1372  }
1373
1374  void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
1375                              BundleRanges& BR) {
1376    LiveInterval* LI = P.first;
1377    LiveRange* LR = P.second;
1378    bool LiveThrough = LR->end > OldIdx.getRegSlot();
1379    if (LiveThrough) {
1380      assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
1381             "Def in bundle should be def range.");
1382      assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
1383             "If bundle has use for this reg it should be LR.");
1384      BR[LI->reg].Use = LR;
1385      return;
1386    }
1387
1388    SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
1389    moveKillFlags(LI->reg, OldIdx, LastUse);
1390
1391    if (LR->start < NewIdx) {
1392      // Becoming a new entering range.
1393      assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
1394             "Bundle shouldn't be re-defining reg mid-range.");
1395      assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
1396             "Bundle shouldn't have different use range for same reg.");
1397      LR->end = LastUse.getRegSlot();
1398      BR[LI->reg].Use = LR;
1399    } else {
1400      // Becoming a new Dead-def.
1401      assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
1402             "Live range starting at unexpected slot.");
1403      assert(BR[LI->reg].Def == LR && "Reg should have def range.");
1404      assert(BR[LI->reg].Dead == 0 &&
1405               "Can't have def and dead def of same reg in a bundle.");
1406      LR->end = LastUse.getDeadSlot();
1407      BR[LI->reg].Dead = BR[LI->reg].Def;
1408      BR[LI->reg].Def = 0;
1409    }
1410  }
1411
1412  void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
1413                                BundleRanges& BR) {
1414    LiveInterval* LI = P.first;
1415    LiveRange* LR = P.second;
1416    if (NewIdx > LR->end) {
1417      // Range extended to bundle. Add to bundle uses.
1418      // Note: Currently adds kill flags to bundle start.
1419      assert(BR[LI->reg].Use == 0 &&
1420             "Bundle already has use range for reg.");
1421      moveKillFlags(LI->reg, LR->end, NewIdx);
1422      LR->end = NewIdx.getRegSlot();
1423      BR[LI->reg].Use = LR;
1424    } else {
1425      assert(BR[LI->reg].Use != 0 &&
1426             "Bundle should already have a use range for reg.");
1427    }
1428  }
1429
1430  void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
1431                               BundleRanges& BR) {
1432    bool GoingUp = NewIdx < OldIdx;
1433
1434    if (GoingUp) {
1435      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1436           EI != EE; ++EI)
1437        moveEnteringUpFromInto(OldIdx, *EI, BR);
1438    } else {
1439      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1440           EI != EE; ++EI)
1441        moveEnteringDownFromInto(OldIdx, *EI, BR);
1442    }
1443  }
1444
1445  void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
1446                            BundleRanges& BR) {
1447    // TODO: Sane rules for moving ranges into bundles.
1448  }
1449
1450  void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
1451                               BundleRanges& BR) {
1452    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1453         II != IE; ++II)
1454      moveInternalFromInto(OldIdx, *II, BR);
1455  }
1456
1457  void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
1458                           BundleRanges& BR) {
1459    LiveInterval* LI = P.first;
1460    LiveRange* LR = P.second;
1461
1462    assert(LR->start.isRegister() &&
1463           "Don't know how to merge exiting ECs into bundles yet.");
1464
1465    if (LR->end > NewIdx.getDeadSlot()) {
1466      // This range is becoming an exiting range on the bundle.
1467      // If there was an old dead-def of this reg, delete it.
1468      if (BR[LI->reg].Dead != 0) {
1469        LI->removeRange(*BR[LI->reg].Dead);
1470        BR[LI->reg].Dead = 0;
1471      }
1472      assert(BR[LI->reg].Def == 0 &&
1473             "Can't have two defs for the same variable exiting a bundle.");
1474      LR->start = NewIdx.getRegSlot();
1475      LR->valno->def = LR->start;
1476      BR[LI->reg].Def = LR;
1477    } else {
1478      // This range is becoming internal to the bundle.
1479      assert(LR->end == NewIdx.getRegSlot() &&
1480             "Can't bundle def whose kill is before the bundle");
1481      if (BR[LI->reg].Dead || BR[LI->reg].Def) {
1482        // Already have a def for this. Just delete range.
1483        LI->removeRange(*LR);
1484      } else {
1485        // Make range dead, record.
1486        LR->end = NewIdx.getDeadSlot();
1487        BR[LI->reg].Dead = LR;
1488        assert(BR[LI->reg].Use == LR &&
1489               "Range becoming dead should currently be use.");
1490      }
1491      // In both cases the range is no longer a use on the bundle.
1492      BR[LI->reg].Use = 0;
1493    }
1494  }
1495
1496  void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
1497                              BundleRanges& BR) {
1498    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1499         EI != EE; ++EI)
1500      moveExitingFromInto(OldIdx, *EI, BR);
1501  }
1502
1503};
1504
1505void LiveIntervals::handleMove(MachineInstr* MI) {
1506  SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1507  Indexes->removeMachineInstrFromMaps(MI);
1508  SlotIndex NewIndex = MI->isInsideBundle() ?
1509                        Indexes->getInstructionIndex(MI) :
1510                        Indexes->insertMachineInstrInMaps(MI);
1511  assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
1512         OldIndex < getMBBEndIdx(MI->getParent()) &&
1513         "Cannot handle moves across basic block boundaries.");
1514  assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
1515
1516  HMEditor HME(*this, *MRI, *TRI, NewIndex);
1517  HME.moveAllRangesFrom(MI, OldIndex);
1518}
1519
1520void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
1521                                         MachineInstr* BundleStart) {
1522  SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1523  HMEditor HME(*this, *MRI, *TRI, NewIndex);
1524  HME.moveAllRangesInto(MI, BundleStart);
1525}
1526