LiveIntervalAnalysis.cpp revision 239462
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "regalloc"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/Value.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineDominators.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/Target/TargetRegisterInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetMachine.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/Debug.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/ADT/DenseSet.h"
35#include "llvm/ADT/STLExtras.h"
36#include "LiveRangeCalc.h"
37#include <algorithm>
38#include <limits>
39#include <cmath>
40using namespace llvm;
41
42// Switch to the new experimental algorithm for computing live intervals.
43static cl::opt<bool>
44NewLiveIntervals("new-live-intervals", cl::Hidden,
45                 cl::desc("Use new algorithm forcomputing live intervals"));
46
47char LiveIntervals::ID = 0;
48char &llvm::LiveIntervalsID = LiveIntervals::ID;
49INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
50                "Live Interval Analysis", false, false)
51INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
52INITIALIZE_PASS_DEPENDENCY(LiveVariables)
53INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
54INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
55INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
56                "Live Interval Analysis", false, false)
57
58void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
59  AU.setPreservesCFG();
60  AU.addRequired<AliasAnalysis>();
61  AU.addPreserved<AliasAnalysis>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreserved<LiveVariables>();
64  AU.addPreservedID(MachineLoopInfoID);
65  AU.addRequiredTransitiveID(MachineDominatorsID);
66  AU.addPreservedID(MachineDominatorsID);
67  AU.addPreserved<SlotIndexes>();
68  AU.addRequiredTransitive<SlotIndexes>();
69  MachineFunctionPass::getAnalysisUsage(AU);
70}
71
72LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
73  DomTree(0), LRCalc(0) {
74  initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
75}
76
77LiveIntervals::~LiveIntervals() {
78  delete LRCalc;
79}
80
81void LiveIntervals::releaseMemory() {
82  // Free the live intervals themselves.
83  for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
84    delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
85  VirtRegIntervals.clear();
86  RegMaskSlots.clear();
87  RegMaskBits.clear();
88  RegMaskBlocks.clear();
89
90  for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
91    delete RegUnitIntervals[i];
92  RegUnitIntervals.clear();
93
94  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
95  VNInfoAllocator.Reset();
96}
97
98/// runOnMachineFunction - Register allocate the whole function
99///
100bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
101  MF = &fn;
102  MRI = &MF->getRegInfo();
103  TM = &fn.getTarget();
104  TRI = TM->getRegisterInfo();
105  TII = TM->getInstrInfo();
106  AA = &getAnalysis<AliasAnalysis>();
107  LV = &getAnalysis<LiveVariables>();
108  Indexes = &getAnalysis<SlotIndexes>();
109  DomTree = &getAnalysis<MachineDominatorTree>();
110  if (!LRCalc)
111    LRCalc = new LiveRangeCalc();
112  AllocatableRegs = TRI->getAllocatableSet(fn);
113  ReservedRegs = TRI->getReservedRegs(fn);
114
115  // Allocate space for all virtual registers.
116  VirtRegIntervals.resize(MRI->getNumVirtRegs());
117
118  if (NewLiveIntervals) {
119    // This is the new way of computing live intervals.
120    // It is independent of LiveVariables, and it can run at any time.
121    computeVirtRegs();
122    computeRegMasks();
123  } else {
124    // This is the old way of computing live intervals.
125    // It depends on LiveVariables.
126    computeIntervals();
127  }
128  computeLiveInRegUnits();
129
130  DEBUG(dump());
131  return true;
132}
133
134/// print - Implement the dump method.
135void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
136  OS << "********** INTERVALS **********\n";
137
138  // Dump the regunits.
139  for (unsigned i = 0, e = RegUnitIntervals.size(); i != e; ++i)
140    if (LiveInterval *LI = RegUnitIntervals[i])
141      OS << PrintRegUnit(i, TRI) << " = " << *LI << '\n';
142
143  // Dump the virtregs.
144  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
145    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
146    if (hasInterval(Reg))
147      OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
148  }
149
150  printInstrs(OS);
151}
152
153void LiveIntervals::printInstrs(raw_ostream &OS) const {
154  OS << "********** MACHINEINSTRS **********\n";
155  MF->print(OS, Indexes);
156}
157
158void LiveIntervals::dumpInstrs() const {
159  printInstrs(dbgs());
160}
161
162static
163bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
164  unsigned Reg = MI.getOperand(MOIdx).getReg();
165  for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
166    const MachineOperand &MO = MI.getOperand(i);
167    if (!MO.isReg())
168      continue;
169    if (MO.getReg() == Reg && MO.isDef()) {
170      assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
171             MI.getOperand(MOIdx).getSubReg() &&
172             (MO.getSubReg() || MO.isImplicit()));
173      return true;
174    }
175  }
176  return false;
177}
178
179/// isPartialRedef - Return true if the specified def at the specific index is
180/// partially re-defining the specified live interval. A common case of this is
181/// a definition of the sub-register.
182bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
183                                   LiveInterval &interval) {
184  if (!MO.getSubReg() || MO.isEarlyClobber())
185    return false;
186
187  SlotIndex RedefIndex = MIIdx.getRegSlot();
188  const LiveRange *OldLR =
189    interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
190  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
191  if (DefMI != 0) {
192    return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
193  }
194  return false;
195}
196
197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198                                             MachineBasicBlock::iterator mi,
199                                             SlotIndex MIIdx,
200                                             MachineOperand& MO,
201                                             unsigned MOIdx,
202                                             LiveInterval &interval) {
203  DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI));
204
205  // Virtual registers may be defined multiple times (due to phi
206  // elimination and 2-addr elimination).  Much of what we do only has to be
207  // done once for the vreg.  We use an empty interval to detect the first
208  // time we see a vreg.
209  LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg);
210  if (interval.empty()) {
211    // Get the Idx of the defining instructions.
212    SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
213
214    // Make sure the first definition is not a partial redefinition.
215    assert(!MO.readsReg() && "First def cannot also read virtual register "
216           "missing <undef> flag?");
217
218    VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
219    assert(ValNo->id == 0 && "First value in interval is not 0?");
220
221    // Loop over all of the blocks that the vreg is defined in.  There are
222    // two cases we have to handle here.  The most common case is a vreg
223    // whose lifetime is contained within a basic block.  In this case there
224    // will be a single kill, in MBB, which comes after the definition.
225    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226      // FIXME: what about dead vars?
227      SlotIndex killIdx;
228      if (vi.Kills[0] != mi)
229        killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
230      else
231        killIdx = defIndex.getDeadSlot();
232
233      // If the kill happens after the definition, we have an intra-block
234      // live range.
235      if (killIdx > defIndex) {
236        assert(vi.AliveBlocks.empty() &&
237               "Shouldn't be alive across any blocks!");
238        LiveRange LR(defIndex, killIdx, ValNo);
239        interval.addRange(LR);
240        DEBUG(dbgs() << " +" << LR << "\n");
241        return;
242      }
243    }
244
245    // The other case we handle is when a virtual register lives to the end
246    // of the defining block, potentially live across some blocks, then is
247    // live into some number of blocks, but gets killed.  Start by adding a
248    // range that goes from this definition to the end of the defining block.
249    LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
250    DEBUG(dbgs() << " +" << NewLR);
251    interval.addRange(NewLR);
252
253    bool PHIJoin = LV->isPHIJoin(interval.reg);
254
255    if (PHIJoin) {
256      // A phi join register is killed at the end of the MBB and revived as a
257      // new valno in the killing blocks.
258      assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
259      DEBUG(dbgs() << " phi-join");
260    } else {
261      // Iterate over all of the blocks that the variable is completely
262      // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
263      // live interval.
264      for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
265               E = vi.AliveBlocks.end(); I != E; ++I) {
266        MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I);
267        LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock),
268                     ValNo);
269        interval.addRange(LR);
270        DEBUG(dbgs() << " +" << LR);
271      }
272    }
273
274    // Finally, this virtual register is live from the start of any killing
275    // block to the 'use' slot of the killing instruction.
276    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
277      MachineInstr *Kill = vi.Kills[i];
278      SlotIndex Start = getMBBStartIdx(Kill->getParent());
279      SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
280
281      // Create interval with one of a NEW value number.  Note that this value
282      // number isn't actually defined by an instruction, weird huh? :)
283      if (PHIJoin) {
284        assert(getInstructionFromIndex(Start) == 0 &&
285               "PHI def index points at actual instruction.");
286        ValNo = interval.getNextValue(Start, VNInfoAllocator);
287      }
288      LiveRange LR(Start, killIdx, ValNo);
289      interval.addRange(LR);
290      DEBUG(dbgs() << " +" << LR);
291    }
292
293  } else {
294    if (MultipleDefsBySameMI(*mi, MOIdx))
295      // Multiple defs of the same virtual register by the same instruction.
296      // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
297      // This is likely due to elimination of REG_SEQUENCE instructions. Return
298      // here since there is nothing to do.
299      return;
300
301    // If this is the second time we see a virtual register definition, it
302    // must be due to phi elimination or two addr elimination.  If this is
303    // the result of two address elimination, then the vreg is one of the
304    // def-and-use register operand.
305
306    // It may also be partial redef like this:
307    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
308    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
309    bool PartReDef = isPartialRedef(MIIdx, MO, interval);
310    if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
311      // If this is a two-address definition, then we have already processed
312      // the live range.  The only problem is that we didn't realize there
313      // are actually two values in the live interval.  Because of this we
314      // need to take the LiveRegion that defines this register and split it
315      // into two values.
316      SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
317
318      const LiveRange *OldLR =
319        interval.getLiveRangeContaining(RedefIndex.getRegSlot(true));
320      VNInfo *OldValNo = OldLR->valno;
321      SlotIndex DefIndex = OldValNo->def.getRegSlot();
322
323      // Delete the previous value, which should be short and continuous,
324      // because the 2-addr copy must be in the same MBB as the redef.
325      interval.removeRange(DefIndex, RedefIndex);
326
327      // The new value number (#1) is defined by the instruction we claimed
328      // defined value #0.
329      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
330
331      // Value#0 is now defined by the 2-addr instruction.
332      OldValNo->def = RedefIndex;
333
334      // Add the new live interval which replaces the range for the input copy.
335      LiveRange LR(DefIndex, RedefIndex, ValNo);
336      DEBUG(dbgs() << " replace range with " << LR);
337      interval.addRange(LR);
338
339      // If this redefinition is dead, we need to add a dummy unit live
340      // range covering the def slot.
341      if (MO.isDead())
342        interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(),
343                                    OldValNo));
344
345      DEBUG(dbgs() << " RESULT: " << interval);
346    } else if (LV->isPHIJoin(interval.reg)) {
347      // In the case of PHI elimination, each variable definition is only
348      // live until the end of the block.  We've already taken care of the
349      // rest of the live range.
350
351      SlotIndex defIndex = MIIdx.getRegSlot();
352      if (MO.isEarlyClobber())
353        defIndex = MIIdx.getRegSlot(true);
354
355      VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
356
357      SlotIndex killIndex = getMBBEndIdx(mbb);
358      LiveRange LR(defIndex, killIndex, ValNo);
359      interval.addRange(LR);
360      DEBUG(dbgs() << " phi-join +" << LR);
361    } else {
362      llvm_unreachable("Multiply defined register");
363    }
364  }
365
366  DEBUG(dbgs() << '\n');
367}
368
369void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
370                                      MachineBasicBlock::iterator MI,
371                                      SlotIndex MIIdx,
372                                      MachineOperand& MO,
373                                      unsigned MOIdx) {
374  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
375    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
376                             getOrCreateInterval(MO.getReg()));
377}
378
379/// computeIntervals - computes the live intervals for virtual
380/// registers. for some ordering of the machine instructions [1,N] a
381/// live interval is an interval [i, j) where 1 <= i <= j < N for
382/// which a variable is live
383void LiveIntervals::computeIntervals() {
384  DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
385               << "********** Function: "
386               << ((Value*)MF->getFunction())->getName() << '\n');
387
388  RegMaskBlocks.resize(MF->getNumBlockIDs());
389
390  SmallVector<unsigned, 8> UndefUses;
391  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
392       MBBI != E; ++MBBI) {
393    MachineBasicBlock *MBB = MBBI;
394    RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size();
395
396    if (MBB->empty())
397      continue;
398
399    // Track the index of the current machine instr.
400    SlotIndex MIIndex = getMBBStartIdx(MBB);
401    DEBUG(dbgs() << "BB#" << MBB->getNumber()
402          << ":\t\t# derived from " << MBB->getName() << "\n");
403
404    // Skip over empty initial indices.
405    if (getInstructionFromIndex(MIIndex) == 0)
406      MIIndex = Indexes->getNextNonNullIndex(MIIndex);
407
408    for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
409         MI != miEnd; ++MI) {
410      DEBUG(dbgs() << MIIndex << "\t" << *MI);
411      if (MI->isDebugValue())
412        continue;
413      assert(Indexes->getInstructionFromIndex(MIIndex) == MI &&
414             "Lost SlotIndex synchronization");
415
416      // Handle defs.
417      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
418        MachineOperand &MO = MI->getOperand(i);
419
420        // Collect register masks.
421        if (MO.isRegMask()) {
422          RegMaskSlots.push_back(MIIndex.getRegSlot());
423          RegMaskBits.push_back(MO.getRegMask());
424          continue;
425        }
426
427        if (!MO.isReg() || !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
428          continue;
429
430        // handle register defs - build intervals
431        if (MO.isDef())
432          handleRegisterDef(MBB, MI, MIIndex, MO, i);
433        else if (MO.isUndef())
434          UndefUses.push_back(MO.getReg());
435      }
436
437      // Move to the next instr slot.
438      MIIndex = Indexes->getNextNonNullIndex(MIIndex);
439    }
440
441    // Compute the number of register mask instructions in this block.
442    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
443    RMB.second = RegMaskSlots.size() - RMB.first;;
444  }
445
446  // Create empty intervals for registers defined by implicit_def's (except
447  // for those implicit_def that define values which are liveout of their
448  // blocks.
449  for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
450    unsigned UndefReg = UndefUses[i];
451    (void)getOrCreateInterval(UndefReg);
452  }
453}
454
455LiveInterval* LiveIntervals::createInterval(unsigned reg) {
456  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
457  return new LiveInterval(reg, Weight);
458}
459
460
461/// computeVirtRegInterval - Compute the live interval of a virtual register,
462/// based on defs and uses.
463void LiveIntervals::computeVirtRegInterval(LiveInterval *LI) {
464  assert(LRCalc && "LRCalc not initialized.");
465  assert(LI->empty() && "Should only compute empty intervals.");
466  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
467  LRCalc->createDeadDefs(LI);
468  LRCalc->extendToUses(LI);
469}
470
471void LiveIntervals::computeVirtRegs() {
472  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
473    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
474    if (MRI->reg_nodbg_empty(Reg))
475      continue;
476    LiveInterval *LI = createInterval(Reg);
477    VirtRegIntervals[Reg] = LI;
478    computeVirtRegInterval(LI);
479  }
480}
481
482void LiveIntervals::computeRegMasks() {
483  RegMaskBlocks.resize(MF->getNumBlockIDs());
484
485  // Find all instructions with regmask operands.
486  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
487       MBBI != E; ++MBBI) {
488    MachineBasicBlock *MBB = MBBI;
489    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
490    RMB.first = RegMaskSlots.size();
491    for (MachineBasicBlock::iterator MI = MBB->begin(), ME = MBB->end();
492         MI != ME; ++MI)
493      for (MIOperands MO(MI); MO.isValid(); ++MO) {
494        if (!MO->isRegMask())
495          continue;
496          RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
497          RegMaskBits.push_back(MO->getRegMask());
498      }
499    // Compute the number of register mask instructions in this block.
500    RMB.second = RegMaskSlots.size() - RMB.first;;
501  }
502}
503
504//===----------------------------------------------------------------------===//
505//                           Register Unit Liveness
506//===----------------------------------------------------------------------===//
507//
508// Fixed interference typically comes from ABI boundaries: Function arguments
509// and return values are passed in fixed registers, and so are exception
510// pointers entering landing pads. Certain instructions require values to be
511// present in specific registers. That is also represented through fixed
512// interference.
513//
514
515/// computeRegUnitInterval - Compute the live interval of a register unit, based
516/// on the uses and defs of aliasing registers.  The interval should be empty,
517/// or contain only dead phi-defs from ABI blocks.
518void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) {
519  unsigned Unit = LI->reg;
520
521  assert(LRCalc && "LRCalc not initialized.");
522  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
523
524  // The physregs aliasing Unit are the roots and their super-registers.
525  // Create all values as dead defs before extending to uses. Note that roots
526  // may share super-registers. That's OK because createDeadDefs() is
527  // idempotent. It is very rare for a register unit to have multiple roots, so
528  // uniquing super-registers is probably not worthwhile.
529  for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
530    unsigned Root = *Roots;
531    if (!MRI->reg_empty(Root))
532      LRCalc->createDeadDefs(LI, Root);
533    for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
534      if (!MRI->reg_empty(*Supers))
535        LRCalc->createDeadDefs(LI, *Supers);
536    }
537  }
538
539  // Now extend LI to reach all uses.
540  // Ignore uses of reserved registers. We only track defs of those.
541  for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
542    unsigned Root = *Roots;
543    if (!isReserved(Root) && !MRI->reg_empty(Root))
544      LRCalc->extendToUses(LI, Root);
545    for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
546      unsigned Reg = *Supers;
547      if (!isReserved(Reg) && !MRI->reg_empty(Reg))
548        LRCalc->extendToUses(LI, Reg);
549    }
550  }
551}
552
553
554/// computeLiveInRegUnits - Precompute the live ranges of any register units
555/// that are live-in to an ABI block somewhere. Register values can appear
556/// without a corresponding def when entering the entry block or a landing pad.
557///
558void LiveIntervals::computeLiveInRegUnits() {
559  RegUnitIntervals.resize(TRI->getNumRegUnits());
560  DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
561
562  // Keep track of the intervals allocated.
563  SmallVector<LiveInterval*, 8> NewIntvs;
564
565  // Check all basic blocks for live-ins.
566  for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
567       MFI != MFE; ++MFI) {
568    const MachineBasicBlock *MBB = MFI;
569
570    // We only care about ABI blocks: Entry + landing pads.
571    if ((MFI != MF->begin() && !MBB->isLandingPad()) || MBB->livein_empty())
572      continue;
573
574    // Create phi-defs at Begin for all live-in registers.
575    SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
576    DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
577    for (MachineBasicBlock::livein_iterator LII = MBB->livein_begin(),
578         LIE = MBB->livein_end(); LII != LIE; ++LII) {
579      for (MCRegUnitIterator Units(*LII, TRI); Units.isValid(); ++Units) {
580        unsigned Unit = *Units;
581        LiveInterval *Intv = RegUnitIntervals[Unit];
582        if (!Intv) {
583          Intv = RegUnitIntervals[Unit] = new LiveInterval(Unit, HUGE_VALF);
584          NewIntvs.push_back(Intv);
585        }
586        VNInfo *VNI = Intv->createDeadDef(Begin, getVNInfoAllocator());
587        (void)VNI;
588        DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
589      }
590    }
591    DEBUG(dbgs() << '\n');
592  }
593  DEBUG(dbgs() << "Created " << NewIntvs.size() << " new intervals.\n");
594
595  // Compute the 'normal' part of the intervals.
596  for (unsigned i = 0, e = NewIntvs.size(); i != e; ++i)
597    computeRegUnitInterval(NewIntvs[i]);
598}
599
600
601/// shrinkToUses - After removing some uses of a register, shrink its live
602/// range to just the remaining uses. This method does not compute reaching
603/// defs for new uses, and it doesn't remove dead defs.
604bool LiveIntervals::shrinkToUses(LiveInterval *li,
605                                 SmallVectorImpl<MachineInstr*> *dead) {
606  DEBUG(dbgs() << "Shrink: " << *li << '\n');
607  assert(TargetRegisterInfo::isVirtualRegister(li->reg)
608         && "Can only shrink virtual registers");
609  // Find all the values used, including PHI kills.
610  SmallVector<std::pair<SlotIndex, VNInfo*>, 16> WorkList;
611
612  // Blocks that have already been added to WorkList as live-out.
613  SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
614
615  // Visit all instructions reading li->reg.
616  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(li->reg);
617       MachineInstr *UseMI = I.skipInstruction();) {
618    if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
619      continue;
620    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
621    LiveRangeQuery LRQ(*li, Idx);
622    VNInfo *VNI = LRQ.valueIn();
623    if (!VNI) {
624      // This shouldn't happen: readsVirtualRegister returns true, but there is
625      // no live value. It is likely caused by a target getting <undef> flags
626      // wrong.
627      DEBUG(dbgs() << Idx << '\t' << *UseMI
628                   << "Warning: Instr claims to read non-existent value in "
629                    << *li << '\n');
630      continue;
631    }
632    // Special case: An early-clobber tied operand reads and writes the
633    // register one slot early.
634    if (VNInfo *DefVNI = LRQ.valueDefined())
635      Idx = DefVNI->def;
636
637    WorkList.push_back(std::make_pair(Idx, VNI));
638  }
639
640  // Create a new live interval with only minimal live segments per def.
641  LiveInterval NewLI(li->reg, 0);
642  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
643       I != E; ++I) {
644    VNInfo *VNI = *I;
645    if (VNI->isUnused())
646      continue;
647    NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI));
648  }
649
650  // Keep track of the PHIs that are in use.
651  SmallPtrSet<VNInfo*, 8> UsedPHIs;
652
653  // Extend intervals to reach all uses in WorkList.
654  while (!WorkList.empty()) {
655    SlotIndex Idx = WorkList.back().first;
656    VNInfo *VNI = WorkList.back().second;
657    WorkList.pop_back();
658    const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot());
659    SlotIndex BlockStart = getMBBStartIdx(MBB);
660
661    // Extend the live range for VNI to be live at Idx.
662    if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) {
663      (void)ExtVNI;
664      assert(ExtVNI == VNI && "Unexpected existing value number");
665      // Is this a PHIDef we haven't seen before?
666      if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
667        continue;
668      // The PHI is live, make sure the predecessors are live-out.
669      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
670           PE = MBB->pred_end(); PI != PE; ++PI) {
671        if (!LiveOut.insert(*PI))
672          continue;
673        SlotIndex Stop = getMBBEndIdx(*PI);
674        // A predecessor is not required to have a live-out value for a PHI.
675        if (VNInfo *PVNI = li->getVNInfoBefore(Stop))
676          WorkList.push_back(std::make_pair(Stop, PVNI));
677      }
678      continue;
679    }
680
681    // VNI is live-in to MBB.
682    DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
683    NewLI.addRange(LiveRange(BlockStart, Idx, VNI));
684
685    // Make sure VNI is live-out from the predecessors.
686    for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
687         PE = MBB->pred_end(); PI != PE; ++PI) {
688      if (!LiveOut.insert(*PI))
689        continue;
690      SlotIndex Stop = getMBBEndIdx(*PI);
691      assert(li->getVNInfoBefore(Stop) == VNI &&
692             "Wrong value out of predecessor");
693      WorkList.push_back(std::make_pair(Stop, VNI));
694    }
695  }
696
697  // Handle dead values.
698  bool CanSeparate = false;
699  for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end();
700       I != E; ++I) {
701    VNInfo *VNI = *I;
702    if (VNI->isUnused())
703      continue;
704    LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def);
705    assert(LII != NewLI.end() && "Missing live range for PHI");
706    if (LII->end != VNI->def.getDeadSlot())
707      continue;
708    if (VNI->isPHIDef()) {
709      // This is a dead PHI. Remove it.
710      VNI->markUnused();
711      NewLI.removeRange(*LII);
712      DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
713      CanSeparate = true;
714    } else {
715      // This is a dead def. Make sure the instruction knows.
716      MachineInstr *MI = getInstructionFromIndex(VNI->def);
717      assert(MI && "No instruction defining live value");
718      MI->addRegisterDead(li->reg, TRI);
719      if (dead && MI->allDefsAreDead()) {
720        DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI);
721        dead->push_back(MI);
722      }
723    }
724  }
725
726  // Move the trimmed ranges back.
727  li->ranges.swap(NewLI.ranges);
728  DEBUG(dbgs() << "Shrunk: " << *li << '\n');
729  return CanSeparate;
730}
731
732
733//===----------------------------------------------------------------------===//
734// Register allocator hooks.
735//
736
737void LiveIntervals::addKillFlags() {
738  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
739    unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
740    if (MRI->reg_nodbg_empty(Reg))
741      continue;
742    LiveInterval *LI = &getInterval(Reg);
743
744    // Every instruction that kills Reg corresponds to a live range end point.
745    for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
746         ++RI) {
747      // A block index indicates an MBB edge.
748      if (RI->end.isBlock())
749        continue;
750      MachineInstr *MI = getInstructionFromIndex(RI->end);
751      if (!MI)
752        continue;
753      MI->addRegisterKilled(Reg, NULL);
754    }
755  }
756}
757
758MachineBasicBlock*
759LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
760  // A local live range must be fully contained inside the block, meaning it is
761  // defined and killed at instructions, not at block boundaries. It is not
762  // live in or or out of any block.
763  //
764  // It is technically possible to have a PHI-defined live range identical to a
765  // single block, but we are going to return false in that case.
766
767  SlotIndex Start = LI.beginIndex();
768  if (Start.isBlock())
769    return NULL;
770
771  SlotIndex Stop = LI.endIndex();
772  if (Stop.isBlock())
773    return NULL;
774
775  // getMBBFromIndex doesn't need to search the MBB table when both indexes
776  // belong to proper instructions.
777  MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
778  MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
779  return MBB1 == MBB2 ? MBB1 : NULL;
780}
781
782bool
783LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
784  for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
785       I != E; ++I) {
786    const VNInfo *PHI = *I;
787    if (PHI->isUnused() || !PHI->isPHIDef())
788      continue;
789    const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
790    // Conservatively return true instead of scanning huge predecessor lists.
791    if (PHIMBB->pred_size() > 100)
792      return true;
793    for (MachineBasicBlock::const_pred_iterator
794         PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
795      if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
796        return true;
797  }
798  return false;
799}
800
801float
802LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
803  // Limit the loop depth ridiculousness.
804  if (loopDepth > 200)
805    loopDepth = 200;
806
807  // The loop depth is used to roughly estimate the number of times the
808  // instruction is executed. Something like 10^d is simple, but will quickly
809  // overflow a float. This expression behaves like 10^d for small d, but is
810  // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
811  // headroom before overflow.
812  // By the way, powf() might be unavailable here. For consistency,
813  // We may take pow(double,double).
814  float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth);
815
816  return (isDef + isUse) * lc;
817}
818
819LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
820                                                  MachineInstr* startInst) {
821  LiveInterval& Interval = getOrCreateInterval(reg);
822  VNInfo* VN = Interval.getNextValue(
823    SlotIndex(getInstructionIndex(startInst).getRegSlot()),
824    getVNInfoAllocator());
825  LiveRange LR(
826     SlotIndex(getInstructionIndex(startInst).getRegSlot()),
827     getMBBEndIdx(startInst->getParent()), VN);
828  Interval.addRange(LR);
829
830  return LR;
831}
832
833
834//===----------------------------------------------------------------------===//
835//                          Register mask functions
836//===----------------------------------------------------------------------===//
837
838bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
839                                             BitVector &UsableRegs) {
840  if (LI.empty())
841    return false;
842  LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
843
844  // Use a smaller arrays for local live ranges.
845  ArrayRef<SlotIndex> Slots;
846  ArrayRef<const uint32_t*> Bits;
847  if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
848    Slots = getRegMaskSlotsInBlock(MBB->getNumber());
849    Bits = getRegMaskBitsInBlock(MBB->getNumber());
850  } else {
851    Slots = getRegMaskSlots();
852    Bits = getRegMaskBits();
853  }
854
855  // We are going to enumerate all the register mask slots contained in LI.
856  // Start with a binary search of RegMaskSlots to find a starting point.
857  ArrayRef<SlotIndex>::iterator SlotI =
858    std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
859  ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
860
861  // No slots in range, LI begins after the last call.
862  if (SlotI == SlotE)
863    return false;
864
865  bool Found = false;
866  for (;;) {
867    assert(*SlotI >= LiveI->start);
868    // Loop over all slots overlapping this segment.
869    while (*SlotI < LiveI->end) {
870      // *SlotI overlaps LI. Collect mask bits.
871      if (!Found) {
872        // This is the first overlap. Initialize UsableRegs to all ones.
873        UsableRegs.clear();
874        UsableRegs.resize(TRI->getNumRegs(), true);
875        Found = true;
876      }
877      // Remove usable registers clobbered by this mask.
878      UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
879      if (++SlotI == SlotE)
880        return Found;
881    }
882    // *SlotI is beyond the current LI segment.
883    LiveI = LI.advanceTo(LiveI, *SlotI);
884    if (LiveI == LiveE)
885      return Found;
886    // Advance SlotI until it overlaps.
887    while (*SlotI < LiveI->start)
888      if (++SlotI == SlotE)
889        return Found;
890  }
891}
892
893//===----------------------------------------------------------------------===//
894//                         IntervalUpdate class.
895//===----------------------------------------------------------------------===//
896
897// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
898class LiveIntervals::HMEditor {
899private:
900  LiveIntervals& LIS;
901  const MachineRegisterInfo& MRI;
902  const TargetRegisterInfo& TRI;
903  SlotIndex NewIdx;
904
905  typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
906  typedef DenseSet<IntRangePair> RangeSet;
907
908  struct RegRanges {
909    LiveRange* Use;
910    LiveRange* EC;
911    LiveRange* Dead;
912    LiveRange* Def;
913    RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
914  };
915  typedef DenseMap<unsigned, RegRanges> BundleRanges;
916
917public:
918  HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
919           const TargetRegisterInfo& TRI, SlotIndex NewIdx)
920    : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
921
922  // Update intervals for all operands of MI from OldIdx to NewIdx.
923  // This assumes that MI used to be at OldIdx, and now resides at
924  // NewIdx.
925  void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
926    assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
927
928    // Collect the operands.
929    RangeSet Entering, Internal, Exiting;
930    bool hasRegMaskOp = false;
931    collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
932
933    // To keep the LiveRanges valid within an interval, move the ranges closest
934    // to the destination first. This prevents ranges from overlapping, to that
935    // APIs like removeRange still work.
936    if (NewIdx < OldIdx) {
937      moveAllEnteringFrom(OldIdx, Entering);
938      moveAllInternalFrom(OldIdx, Internal);
939      moveAllExitingFrom(OldIdx, Exiting);
940    }
941    else {
942      moveAllExitingFrom(OldIdx, Exiting);
943      moveAllInternalFrom(OldIdx, Internal);
944      moveAllEnteringFrom(OldIdx, Entering);
945    }
946
947    if (hasRegMaskOp)
948      updateRegMaskSlots(OldIdx);
949
950#ifndef NDEBUG
951    LIValidator validator;
952    validator = std::for_each(Entering.begin(), Entering.end(), validator);
953    validator = std::for_each(Internal.begin(), Internal.end(), validator);
954    validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
955    assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
956#endif
957
958  }
959
960  // Update intervals for all operands of MI to refer to BundleStart's
961  // SlotIndex.
962  void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
963    if (MI == BundleStart)
964      return; // Bundling instr with itself - nothing to do.
965
966    SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
967    assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
968           "SlotIndex <-> Instruction mapping broken for MI");
969
970    // Collect all ranges already in the bundle.
971    MachineBasicBlock::instr_iterator BII(BundleStart);
972    RangeSet Entering, Internal, Exiting;
973    bool hasRegMaskOp = false;
974    collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
975    assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
976    for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
977      if (&*BII == MI)
978        continue;
979      collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
980      assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
981    }
982
983    BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
984
985    Entering.clear();
986    Internal.clear();
987    Exiting.clear();
988    collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
989    assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
990
991    DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
992    DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
993    DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
994
995    moveAllEnteringFromInto(OldIdx, Entering, BR);
996    moveAllInternalFromInto(OldIdx, Internal, BR);
997    moveAllExitingFromInto(OldIdx, Exiting, BR);
998
999
1000#ifndef NDEBUG
1001    LIValidator validator;
1002    validator = std::for_each(Entering.begin(), Entering.end(), validator);
1003    validator = std::for_each(Internal.begin(), Internal.end(), validator);
1004    validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
1005    assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
1006#endif
1007  }
1008
1009private:
1010
1011#ifndef NDEBUG
1012  class LIValidator {
1013  private:
1014    DenseSet<const LiveInterval*> Checked, Bogus;
1015  public:
1016    void operator()(const IntRangePair& P) {
1017      const LiveInterval* LI = P.first;
1018      if (Checked.count(LI))
1019        return;
1020      Checked.insert(LI);
1021      if (LI->empty())
1022        return;
1023      SlotIndex LastEnd = LI->begin()->start;
1024      for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
1025           LRI != LRE; ++LRI) {
1026        const LiveRange& LR = *LRI;
1027        if (LastEnd > LR.start || LR.start >= LR.end)
1028          Bogus.insert(LI);
1029        LastEnd = LR.end;
1030      }
1031    }
1032
1033    bool rangesOk() const {
1034      return Bogus.empty();
1035    }
1036  };
1037#endif
1038
1039  // Collect IntRangePairs for all operands of MI that may need fixing.
1040  // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
1041  // maps).
1042  void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
1043                     RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
1044    hasRegMaskOp = false;
1045    for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
1046                                    MOE = MI->operands_end();
1047         MOI != MOE; ++MOI) {
1048      const MachineOperand& MO = *MOI;
1049
1050      if (MO.isRegMask()) {
1051        hasRegMaskOp = true;
1052        continue;
1053      }
1054
1055      if (!MO.isReg() || MO.getReg() == 0)
1056        continue;
1057
1058      unsigned Reg = MO.getReg();
1059
1060      // TODO: Currently we're skipping uses that are reserved or have no
1061      // interval, but we're not updating their kills. This should be
1062      // fixed.
1063      if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))
1064        continue;
1065
1066      // Collect ranges for register units. These live ranges are computed on
1067      // demand, so just skip any that haven't been computed yet.
1068      if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
1069        for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
1070          if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
1071            collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx);
1072      } else {
1073        // Collect ranges for individual virtual registers.
1074        collectRanges(MO, &LIS.getInterval(Reg),
1075                      Entering, Internal, Exiting, OldIdx);
1076      }
1077    }
1078  }
1079
1080  void collectRanges(const MachineOperand &MO, LiveInterval *LI,
1081                     RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
1082                     SlotIndex OldIdx) {
1083    if (MO.readsReg()) {
1084      LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
1085      if (LR != 0)
1086        Entering.insert(std::make_pair(LI, LR));
1087    }
1088    if (MO.isDef()) {
1089      LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
1090      assert(LR != 0 && "No live range for def?");
1091      if (LR->end > OldIdx.getDeadSlot())
1092        Exiting.insert(std::make_pair(LI, LR));
1093      else
1094        Internal.insert(std::make_pair(LI, LR));
1095    }
1096  }
1097
1098  BundleRanges createBundleRanges(RangeSet& Entering,
1099                                  RangeSet& Internal,
1100                                  RangeSet& Exiting) {
1101    BundleRanges BR;
1102
1103    for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1104         EI != EE; ++EI) {
1105      LiveInterval* LI = EI->first;
1106      LiveRange* LR = EI->second;
1107      BR[LI->reg].Use = LR;
1108    }
1109
1110    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1111         II != IE; ++II) {
1112      LiveInterval* LI = II->first;
1113      LiveRange* LR = II->second;
1114      if (LR->end.isDead()) {
1115        BR[LI->reg].Dead = LR;
1116      } else {
1117        BR[LI->reg].EC = LR;
1118      }
1119    }
1120
1121    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1122         EI != EE; ++EI) {
1123      LiveInterval* LI = EI->first;
1124      LiveRange* LR = EI->second;
1125      BR[LI->reg].Def = LR;
1126    }
1127
1128    return BR;
1129  }
1130
1131  void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
1132    MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
1133    if (!OldKillMI->killsRegister(reg))
1134      return; // Bail out if we don't have kill flags on the old register.
1135    MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
1136    assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
1137    assert(!NewKillMI->killsRegister(reg) &&
1138           "New kill instr is already a kill.");
1139    OldKillMI->clearRegisterKills(reg, &TRI);
1140    NewKillMI->addRegisterKilled(reg, &TRI);
1141  }
1142
1143  void updateRegMaskSlots(SlotIndex OldIdx) {
1144    SmallVectorImpl<SlotIndex>::iterator RI =
1145      std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1146                       OldIdx);
1147    assert(*RI == OldIdx && "No RegMask at OldIdx.");
1148    *RI = NewIdx;
1149    assert(*prior(RI) < *RI && *RI < *next(RI) &&
1150           "RegSlots out of order. Did you move one call across another?");
1151  }
1152
1153  // Return the last use of reg between NewIdx and OldIdx.
1154  SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
1155    SlotIndex LastUse = NewIdx;
1156    for (MachineRegisterInfo::use_nodbg_iterator
1157           UI = MRI.use_nodbg_begin(Reg),
1158           UE = MRI.use_nodbg_end();
1159         UI != UE; UI.skipInstruction()) {
1160      const MachineInstr* MI = &*UI;
1161      SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1162      if (InstSlot > LastUse && InstSlot < OldIdx)
1163        LastUse = InstSlot;
1164    }
1165    return LastUse;
1166  }
1167
1168  void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
1169    LiveInterval* LI = P.first;
1170    LiveRange* LR = P.second;
1171    bool LiveThrough = LR->end > OldIdx.getRegSlot();
1172    if (LiveThrough)
1173      return;
1174    SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
1175    if (LastUse != NewIdx)
1176      moveKillFlags(LI->reg, NewIdx, LastUse);
1177    LR->end = LastUse.getRegSlot();
1178  }
1179
1180  void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
1181    LiveInterval* LI = P.first;
1182    LiveRange* LR = P.second;
1183    // Extend the LiveRange if NewIdx is past the end.
1184    if (NewIdx > LR->end) {
1185      // Move kill flags if OldIdx was not originally the end
1186      // (otherwise LR->end points to an invalid slot).
1187      if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
1188        assert(LR->end > OldIdx && "LiveRange does not cover original slot");
1189        moveKillFlags(LI->reg, LR->end, NewIdx);
1190      }
1191      LR->end = NewIdx.getRegSlot();
1192    }
1193  }
1194
1195  void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
1196    bool GoingUp = NewIdx < OldIdx;
1197
1198    if (GoingUp) {
1199      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1200           EI != EE; ++EI)
1201        moveEnteringUpFrom(OldIdx, *EI);
1202    } else {
1203      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1204           EI != EE; ++EI)
1205        moveEnteringDownFrom(OldIdx, *EI);
1206    }
1207  }
1208
1209  void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
1210    LiveInterval* LI = P.first;
1211    LiveRange* LR = P.second;
1212    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
1213           LR->end <= OldIdx.getDeadSlot() &&
1214           "Range should be internal to OldIdx.");
1215    LiveRange Tmp(*LR);
1216    Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
1217    Tmp.valno->def = Tmp.start;
1218    Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
1219    LI->removeRange(*LR);
1220    LI->addRange(Tmp);
1221  }
1222
1223  void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
1224    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1225         II != IE; ++II)
1226      moveInternalFrom(OldIdx, *II);
1227  }
1228
1229  void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
1230    LiveRange* LR = P.second;
1231    assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
1232           "Range should start in OldIdx.");
1233    assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
1234    SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
1235    LR->start = NewStart;
1236    LR->valno->def = NewStart;
1237  }
1238
1239  void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
1240    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1241         EI != EE; ++EI)
1242      moveExitingFrom(OldIdx, *EI);
1243  }
1244
1245  void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
1246                              BundleRanges& BR) {
1247    LiveInterval* LI = P.first;
1248    LiveRange* LR = P.second;
1249    bool LiveThrough = LR->end > OldIdx.getRegSlot();
1250    if (LiveThrough) {
1251      assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
1252             "Def in bundle should be def range.");
1253      assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
1254             "If bundle has use for this reg it should be LR.");
1255      BR[LI->reg].Use = LR;
1256      return;
1257    }
1258
1259    SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
1260    moveKillFlags(LI->reg, OldIdx, LastUse);
1261
1262    if (LR->start < NewIdx) {
1263      // Becoming a new entering range.
1264      assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
1265             "Bundle shouldn't be re-defining reg mid-range.");
1266      assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
1267             "Bundle shouldn't have different use range for same reg.");
1268      LR->end = LastUse.getRegSlot();
1269      BR[LI->reg].Use = LR;
1270    } else {
1271      // Becoming a new Dead-def.
1272      assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
1273             "Live range starting at unexpected slot.");
1274      assert(BR[LI->reg].Def == LR && "Reg should have def range.");
1275      assert(BR[LI->reg].Dead == 0 &&
1276               "Can't have def and dead def of same reg in a bundle.");
1277      LR->end = LastUse.getDeadSlot();
1278      BR[LI->reg].Dead = BR[LI->reg].Def;
1279      BR[LI->reg].Def = 0;
1280    }
1281  }
1282
1283  void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
1284                                BundleRanges& BR) {
1285    LiveInterval* LI = P.first;
1286    LiveRange* LR = P.second;
1287    if (NewIdx > LR->end) {
1288      // Range extended to bundle. Add to bundle uses.
1289      // Note: Currently adds kill flags to bundle start.
1290      assert(BR[LI->reg].Use == 0 &&
1291             "Bundle already has use range for reg.");
1292      moveKillFlags(LI->reg, LR->end, NewIdx);
1293      LR->end = NewIdx.getRegSlot();
1294      BR[LI->reg].Use = LR;
1295    } else {
1296      assert(BR[LI->reg].Use != 0 &&
1297             "Bundle should already have a use range for reg.");
1298    }
1299  }
1300
1301  void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
1302                               BundleRanges& BR) {
1303    bool GoingUp = NewIdx < OldIdx;
1304
1305    if (GoingUp) {
1306      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1307           EI != EE; ++EI)
1308        moveEnteringUpFromInto(OldIdx, *EI, BR);
1309    } else {
1310      for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
1311           EI != EE; ++EI)
1312        moveEnteringDownFromInto(OldIdx, *EI, BR);
1313    }
1314  }
1315
1316  void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
1317                            BundleRanges& BR) {
1318    // TODO: Sane rules for moving ranges into bundles.
1319  }
1320
1321  void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
1322                               BundleRanges& BR) {
1323    for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
1324         II != IE; ++II)
1325      moveInternalFromInto(OldIdx, *II, BR);
1326  }
1327
1328  void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
1329                           BundleRanges& BR) {
1330    LiveInterval* LI = P.first;
1331    LiveRange* LR = P.second;
1332
1333    assert(LR->start.isRegister() &&
1334           "Don't know how to merge exiting ECs into bundles yet.");
1335
1336    if (LR->end > NewIdx.getDeadSlot()) {
1337      // This range is becoming an exiting range on the bundle.
1338      // If there was an old dead-def of this reg, delete it.
1339      if (BR[LI->reg].Dead != 0) {
1340        LI->removeRange(*BR[LI->reg].Dead);
1341        BR[LI->reg].Dead = 0;
1342      }
1343      assert(BR[LI->reg].Def == 0 &&
1344             "Can't have two defs for the same variable exiting a bundle.");
1345      LR->start = NewIdx.getRegSlot();
1346      LR->valno->def = LR->start;
1347      BR[LI->reg].Def = LR;
1348    } else {
1349      // This range is becoming internal to the bundle.
1350      assert(LR->end == NewIdx.getRegSlot() &&
1351             "Can't bundle def whose kill is before the bundle");
1352      if (BR[LI->reg].Dead || BR[LI->reg].Def) {
1353        // Already have a def for this. Just delete range.
1354        LI->removeRange(*LR);
1355      } else {
1356        // Make range dead, record.
1357        LR->end = NewIdx.getDeadSlot();
1358        BR[LI->reg].Dead = LR;
1359        assert(BR[LI->reg].Use == LR &&
1360               "Range becoming dead should currently be use.");
1361      }
1362      // In both cases the range is no longer a use on the bundle.
1363      BR[LI->reg].Use = 0;
1364    }
1365  }
1366
1367  void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
1368                              BundleRanges& BR) {
1369    for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
1370         EI != EE; ++EI)
1371      moveExitingFromInto(OldIdx, *EI, BR);
1372  }
1373
1374};
1375
1376void LiveIntervals::handleMove(MachineInstr* MI) {
1377  SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1378  Indexes->removeMachineInstrFromMaps(MI);
1379  SlotIndex NewIndex = MI->isInsideBundle() ?
1380                        Indexes->getInstructionIndex(MI) :
1381                        Indexes->insertMachineInstrInMaps(MI);
1382  assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
1383         OldIndex < getMBBEndIdx(MI->getParent()) &&
1384         "Cannot handle moves across basic block boundaries.");
1385  assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
1386
1387  HMEditor HME(*this, *MRI, *TRI, NewIndex);
1388  HME.moveAllRangesFrom(MI, OldIndex);
1389}
1390
1391void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
1392                                         MachineInstr* BundleStart) {
1393  SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1394  HMEditor HME(*this, *MRI, *TRI, NewIndex);
1395  HME.moveAllRangesInto(MI, BundleStart);
1396}
1397