LiveIntervalAnalysis.cpp revision 193323
1193323Sed//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2193323Sed//
3193323Sed//                     The LLVM Compiler Infrastructure
4193323Sed//
5193323Sed// This file is distributed under the University of Illinois Open Source
6193323Sed// License. See LICENSE.TXT for details.
7193323Sed//
8193323Sed//===----------------------------------------------------------------------===//
9193323Sed//
10193323Sed// This file implements the LiveInterval analysis pass which is used
11193323Sed// by the Linear Scan Register allocator. This pass linearizes the
12193323Sed// basic blocks of the function in DFS order and uses the
13193323Sed// LiveVariables pass to conservatively compute live intervals for
14193323Sed// each virtual and physical register.
15193323Sed//
16193323Sed//===----------------------------------------------------------------------===//
17193323Sed
18193323Sed#define DEBUG_TYPE "liveintervals"
19193323Sed#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20193323Sed#include "VirtRegMap.h"
21193323Sed#include "llvm/Value.h"
22193323Sed#include "llvm/Analysis/AliasAnalysis.h"
23193323Sed#include "llvm/CodeGen/LiveVariables.h"
24193323Sed#include "llvm/CodeGen/MachineFrameInfo.h"
25193323Sed#include "llvm/CodeGen/MachineInstr.h"
26193323Sed#include "llvm/CodeGen/MachineLoopInfo.h"
27193323Sed#include "llvm/CodeGen/MachineRegisterInfo.h"
28193323Sed#include "llvm/CodeGen/Passes.h"
29193323Sed#include "llvm/CodeGen/PseudoSourceValue.h"
30193323Sed#include "llvm/Target/TargetRegisterInfo.h"
31193323Sed#include "llvm/Target/TargetInstrInfo.h"
32193323Sed#include "llvm/Target/TargetMachine.h"
33193323Sed#include "llvm/Target/TargetOptions.h"
34193323Sed#include "llvm/Support/CommandLine.h"
35193323Sed#include "llvm/Support/Debug.h"
36193323Sed#include "llvm/ADT/Statistic.h"
37193323Sed#include "llvm/ADT/STLExtras.h"
38193323Sed#include <algorithm>
39193323Sed#include <limits>
40193323Sed#include <cmath>
41193323Sedusing namespace llvm;
42193323Sed
43193323Sed// Hidden options for help debugging.
44193323Sedstatic cl::opt<bool> DisableReMat("disable-rematerialization",
45193323Sed                                  cl::init(false), cl::Hidden);
46193323Sed
47193323Sedstatic cl::opt<bool> SplitAtBB("split-intervals-at-bb",
48193323Sed                               cl::init(true), cl::Hidden);
49193323Sedstatic cl::opt<int> SplitLimit("split-limit",
50193323Sed                               cl::init(-1), cl::Hidden);
51193323Sed
52193323Sedstatic cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53193323Sed
54193323Sedstatic cl::opt<bool> EnableFastSpilling("fast-spill",
55193323Sed                                        cl::init(false), cl::Hidden);
56193323Sed
57193323SedSTATISTIC(numIntervals, "Number of original intervals");
58193323SedSTATISTIC(numFolds    , "Number of loads/stores folded into instructions");
59193323SedSTATISTIC(numSplits   , "Number of intervals split");
60193323Sed
61193323Sedchar LiveIntervals::ID = 0;
62193323Sedstatic RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63193323Sed
64193323Sedvoid LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65193323Sed  AU.addRequired<AliasAnalysis>();
66193323Sed  AU.addPreserved<AliasAnalysis>();
67193323Sed  AU.addPreserved<LiveVariables>();
68193323Sed  AU.addRequired<LiveVariables>();
69193323Sed  AU.addPreservedID(MachineLoopInfoID);
70193323Sed  AU.addPreservedID(MachineDominatorsID);
71193323Sed
72193323Sed  if (!StrongPHIElim) {
73193323Sed    AU.addPreservedID(PHIEliminationID);
74193323Sed    AU.addRequiredID(PHIEliminationID);
75193323Sed  }
76193323Sed
77193323Sed  AU.addRequiredID(TwoAddressInstructionPassID);
78193323Sed  MachineFunctionPass::getAnalysisUsage(AU);
79193323Sed}
80193323Sed
81193323Sedvoid LiveIntervals::releaseMemory() {
82193323Sed  // Free the live intervals themselves.
83193323Sed  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84193323Sed       E = r2iMap_.end(); I != E; ++I)
85193323Sed    delete I->second;
86193323Sed
87193323Sed  MBB2IdxMap.clear();
88193323Sed  Idx2MBBMap.clear();
89193323Sed  mi2iMap_.clear();
90193323Sed  i2miMap_.clear();
91193323Sed  r2iMap_.clear();
92193323Sed  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93193323Sed  VNInfoAllocator.Reset();
94193323Sed  while (!ClonedMIs.empty()) {
95193323Sed    MachineInstr *MI = ClonedMIs.back();
96193323Sed    ClonedMIs.pop_back();
97193323Sed    mf_->DeleteMachineInstr(MI);
98193323Sed  }
99193323Sed}
100193323Sed
101193323Sedvoid LiveIntervals::computeNumbering() {
102193323Sed  Index2MiMap OldI2MI = i2miMap_;
103193323Sed  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
104193323Sed
105193323Sed  Idx2MBBMap.clear();
106193323Sed  MBB2IdxMap.clear();
107193323Sed  mi2iMap_.clear();
108193323Sed  i2miMap_.clear();
109193323Sed
110193323Sed  FunctionSize = 0;
111193323Sed
112193323Sed  // Number MachineInstrs and MachineBasicBlocks.
113193323Sed  // Initialize MBB indexes to a sentinal.
114193323Sed  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115193323Sed
116193323Sed  unsigned MIIndex = 0;
117193323Sed  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118193323Sed       MBB != E; ++MBB) {
119193323Sed    unsigned StartIdx = MIIndex;
120193323Sed
121193323Sed    // Insert an empty slot at the beginning of each block.
122193323Sed    MIIndex += InstrSlots::NUM;
123193323Sed    i2miMap_.push_back(0);
124193323Sed
125193323Sed    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126193323Sed         I != E; ++I) {
127193323Sed      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128193323Sed      assert(inserted && "multiple MachineInstr -> index mappings");
129193323Sed      inserted = true;
130193323Sed      i2miMap_.push_back(I);
131193323Sed      MIIndex += InstrSlots::NUM;
132193323Sed      FunctionSize++;
133193323Sed
134193323Sed      // Insert max(1, numdefs) empty slots after every instruction.
135193323Sed      unsigned Slots = I->getDesc().getNumDefs();
136193323Sed      if (Slots == 0)
137193323Sed        Slots = 1;
138193323Sed      MIIndex += InstrSlots::NUM * Slots;
139193323Sed      while (Slots--)
140193323Sed        i2miMap_.push_back(0);
141193323Sed    }
142193323Sed
143193323Sed    // Set the MBB2IdxMap entry for this MBB.
144193323Sed    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
145193323Sed    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146193323Sed  }
147193323Sed  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148193323Sed
149193323Sed  if (!OldI2MI.empty())
150193323Sed    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
151193323Sed      for (LiveInterval::iterator LI = OI->second->begin(),
152193323Sed           LE = OI->second->end(); LI != LE; ++LI) {
153193323Sed
154193323Sed        // Remap the start index of the live range to the corresponding new
155193323Sed        // number, or our best guess at what it _should_ correspond to if the
156193323Sed        // original instruction has been erased.  This is either the following
157193323Sed        // instruction or its predecessor.
158193323Sed        unsigned index = LI->start / InstrSlots::NUM;
159193323Sed        unsigned offset = LI->start % InstrSlots::NUM;
160193323Sed        if (offset == InstrSlots::LOAD) {
161193323Sed          std::vector<IdxMBBPair>::const_iterator I =
162193323Sed                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
163193323Sed          // Take the pair containing the index
164193323Sed          std::vector<IdxMBBPair>::const_iterator J =
165193323Sed                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166193323Sed
167193323Sed          LI->start = getMBBStartIdx(J->second);
168193323Sed        } else {
169193323Sed          LI->start = mi2iMap_[OldI2MI[index]] + offset;
170193323Sed        }
171193323Sed
172193323Sed        // Remap the ending index in the same way that we remapped the start,
173193323Sed        // except for the final step where we always map to the immediately
174193323Sed        // following instruction.
175193323Sed        index = (LI->end - 1) / InstrSlots::NUM;
176193323Sed        offset  = LI->end % InstrSlots::NUM;
177193323Sed        if (offset == InstrSlots::LOAD) {
178193323Sed          // VReg dies at end of block.
179193323Sed          std::vector<IdxMBBPair>::const_iterator I =
180193323Sed                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
181193323Sed          --I;
182193323Sed
183193323Sed          LI->end = getMBBEndIdx(I->second) + 1;
184193323Sed        } else {
185193323Sed          unsigned idx = index;
186193323Sed          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187193323Sed
188193323Sed          if (index != OldI2MI.size())
189193323Sed            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190193323Sed          else
191193323Sed            LI->end = InstrSlots::NUM * i2miMap_.size();
192193323Sed        }
193193323Sed      }
194193323Sed
195193323Sed      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
196193323Sed           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
197193323Sed        VNInfo* vni = *VNI;
198193323Sed
199193323Sed        // Remap the VNInfo def index, which works the same as the
200193323Sed        // start indices above. VN's with special sentinel defs
201193323Sed        // don't need to be remapped.
202193323Sed        if (vni->def != ~0U && vni->def != ~1U) {
203193323Sed          unsigned index = vni->def / InstrSlots::NUM;
204193323Sed          unsigned offset = vni->def % InstrSlots::NUM;
205193323Sed          if (offset == InstrSlots::LOAD) {
206193323Sed            std::vector<IdxMBBPair>::const_iterator I =
207193323Sed                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
208193323Sed            // Take the pair containing the index
209193323Sed            std::vector<IdxMBBPair>::const_iterator J =
210193323Sed                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211193323Sed
212193323Sed            vni->def = getMBBStartIdx(J->second);
213193323Sed          } else {
214193323Sed            vni->def = mi2iMap_[OldI2MI[index]] + offset;
215193323Sed          }
216193323Sed        }
217193323Sed
218193323Sed        // Remap the VNInfo kill indices, which works the same as
219193323Sed        // the end indices above.
220193323Sed        for (size_t i = 0; i < vni->kills.size(); ++i) {
221193323Sed          // PHI kills don't need to be remapped.
222193323Sed          if (!vni->kills[i]) continue;
223193323Sed
224193323Sed          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
225193323Sed          unsigned offset = vni->kills[i] % InstrSlots::NUM;
226193323Sed          if (offset == InstrSlots::LOAD) {
227193323Sed            std::vector<IdxMBBPair>::const_iterator I =
228193323Sed             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
229193323Sed            --I;
230193323Sed
231193323Sed            vni->kills[i] = getMBBEndIdx(I->second);
232193323Sed          } else {
233193323Sed            unsigned idx = index;
234193323Sed            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235193323Sed
236193323Sed            if (index != OldI2MI.size())
237193323Sed              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
238193323Sed                              (idx == index ? offset : 0);
239193323Sed            else
240193323Sed              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
241193323Sed          }
242193323Sed        }
243193323Sed      }
244193323Sed    }
245193323Sed}
246193323Sed
247193323Sedvoid LiveIntervals::scaleNumbering(int factor) {
248193323Sed  // Need to
249193323Sed  //  * scale MBB begin and end points
250193323Sed  //  * scale all ranges.
251193323Sed  //  * Update VNI structures.
252193323Sed  //  * Scale instruction numberings
253193323Sed
254193323Sed  // Scale the MBB indices.
255193323Sed  Idx2MBBMap.clear();
256193323Sed  for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
257193323Sed       MBB != MBBE; ++MBB) {
258193323Sed    std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
259193323Sed    mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
260193323Sed    mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
261193323Sed    Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
262193323Sed  }
263193323Sed  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
264193323Sed
265193323Sed  // Scale the intervals.
266193323Sed  for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
267193323Sed    LI->second->scaleNumbering(factor);
268193323Sed  }
269193323Sed
270193323Sed  // Scale MachineInstrs.
271193323Sed  Mi2IndexMap oldmi2iMap = mi2iMap_;
272193323Sed  unsigned highestSlot = 0;
273193323Sed  for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
274193323Sed       MI != ME; ++MI) {
275193323Sed    unsigned newSlot = InstrSlots::scale(MI->second, factor);
276193323Sed    mi2iMap_[MI->first] = newSlot;
277193323Sed    highestSlot = std::max(highestSlot, newSlot);
278193323Sed  }
279193323Sed
280193323Sed  i2miMap_.clear();
281193323Sed  i2miMap_.resize(highestSlot + 1);
282193323Sed  for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
283193323Sed       MI != ME; ++MI) {
284193323Sed    i2miMap_[MI->second] = MI->first;
285193323Sed  }
286193323Sed
287193323Sed}
288193323Sed
289193323Sed
290193323Sed/// runOnMachineFunction - Register allocate the whole function
291193323Sed///
292193323Sedbool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
293193323Sed  mf_ = &fn;
294193323Sed  mri_ = &mf_->getRegInfo();
295193323Sed  tm_ = &fn.getTarget();
296193323Sed  tri_ = tm_->getRegisterInfo();
297193323Sed  tii_ = tm_->getInstrInfo();
298193323Sed  aa_ = &getAnalysis<AliasAnalysis>();
299193323Sed  lv_ = &getAnalysis<LiveVariables>();
300193323Sed  allocatableRegs_ = tri_->getAllocatableSet(fn);
301193323Sed
302193323Sed  computeNumbering();
303193323Sed  computeIntervals();
304193323Sed
305193323Sed  numIntervals += getNumIntervals();
306193323Sed
307193323Sed  DEBUG(dump());
308193323Sed  return true;
309193323Sed}
310193323Sed
311193323Sed/// print - Implement the dump method.
312193323Sedvoid LiveIntervals::print(std::ostream &O, const Module* ) const {
313193323Sed  O << "********** INTERVALS **********\n";
314193323Sed  for (const_iterator I = begin(), E = end(); I != E; ++I) {
315193323Sed    I->second->print(O, tri_);
316193323Sed    O << "\n";
317193323Sed  }
318193323Sed
319193323Sed  O << "********** MACHINEINSTRS **********\n";
320193323Sed  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
321193323Sed       mbbi != mbbe; ++mbbi) {
322193323Sed    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
323193323Sed    for (MachineBasicBlock::iterator mii = mbbi->begin(),
324193323Sed           mie = mbbi->end(); mii != mie; ++mii) {
325193323Sed      O << getInstructionIndex(mii) << '\t' << *mii;
326193323Sed    }
327193323Sed  }
328193323Sed}
329193323Sed
330193323Sed/// conflictsWithPhysRegDef - Returns true if the specified register
331193323Sed/// is defined during the duration of the specified interval.
332193323Sedbool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
333193323Sed                                            VirtRegMap &vrm, unsigned reg) {
334193323Sed  for (LiveInterval::Ranges::const_iterator
335193323Sed         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
336193323Sed    for (unsigned index = getBaseIndex(I->start),
337193323Sed           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
338193323Sed         index += InstrSlots::NUM) {
339193323Sed      // skip deleted instructions
340193323Sed      while (index != end && !getInstructionFromIndex(index))
341193323Sed        index += InstrSlots::NUM;
342193323Sed      if (index == end) break;
343193323Sed
344193323Sed      MachineInstr *MI = getInstructionFromIndex(index);
345193323Sed      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
346193323Sed      if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
347193323Sed        if (SrcReg == li.reg || DstReg == li.reg)
348193323Sed          continue;
349193323Sed      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
350193323Sed        MachineOperand& mop = MI->getOperand(i);
351193323Sed        if (!mop.isReg())
352193323Sed          continue;
353193323Sed        unsigned PhysReg = mop.getReg();
354193323Sed        if (PhysReg == 0 || PhysReg == li.reg)
355193323Sed          continue;
356193323Sed        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
357193323Sed          if (!vrm.hasPhys(PhysReg))
358193323Sed            continue;
359193323Sed          PhysReg = vrm.getPhys(PhysReg);
360193323Sed        }
361193323Sed        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
362193323Sed          return true;
363193323Sed      }
364193323Sed    }
365193323Sed  }
366193323Sed
367193323Sed  return false;
368193323Sed}
369193323Sed
370193323Sed/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
371193323Sed/// it can check use as well.
372193323Sedbool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
373193323Sed                                            unsigned Reg, bool CheckUse,
374193323Sed                                  SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
375193323Sed  for (LiveInterval::Ranges::const_iterator
376193323Sed         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
377193323Sed    for (unsigned index = getBaseIndex(I->start),
378193323Sed           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
379193323Sed         index += InstrSlots::NUM) {
380193323Sed      // Skip deleted instructions.
381193323Sed      MachineInstr *MI = 0;
382193323Sed      while (index != end) {
383193323Sed        MI = getInstructionFromIndex(index);
384193323Sed        if (MI)
385193323Sed          break;
386193323Sed        index += InstrSlots::NUM;
387193323Sed      }
388193323Sed      if (index == end) break;
389193323Sed
390193323Sed      if (JoinedCopies.count(MI))
391193323Sed        continue;
392193323Sed      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
393193323Sed        MachineOperand& MO = MI->getOperand(i);
394193323Sed        if (!MO.isReg())
395193323Sed          continue;
396193323Sed        if (MO.isUse() && !CheckUse)
397193323Sed          continue;
398193323Sed        unsigned PhysReg = MO.getReg();
399193323Sed        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
400193323Sed          continue;
401193323Sed        if (tri_->isSubRegister(Reg, PhysReg))
402193323Sed          return true;
403193323Sed      }
404193323Sed    }
405193323Sed  }
406193323Sed
407193323Sed  return false;
408193323Sed}
409193323Sed
410193323Sed
411193323Sedvoid LiveIntervals::printRegName(unsigned reg) const {
412193323Sed  if (TargetRegisterInfo::isPhysicalRegister(reg))
413193323Sed    cerr << tri_->getName(reg);
414193323Sed  else
415193323Sed    cerr << "%reg" << reg;
416193323Sed}
417193323Sed
418193323Sedvoid LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
419193323Sed                                             MachineBasicBlock::iterator mi,
420193323Sed                                             unsigned MIIdx, MachineOperand& MO,
421193323Sed                                             unsigned MOIdx,
422193323Sed                                             LiveInterval &interval) {
423193323Sed  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
424193323Sed  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
425193323Sed
426193323Sed  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
427193323Sed    DOUT << "is a implicit_def\n";
428193323Sed    return;
429193323Sed  }
430193323Sed
431193323Sed  // Virtual registers may be defined multiple times (due to phi
432193323Sed  // elimination and 2-addr elimination).  Much of what we do only has to be
433193323Sed  // done once for the vreg.  We use an empty interval to detect the first
434193323Sed  // time we see a vreg.
435193323Sed  if (interval.empty()) {
436193323Sed    // Get the Idx of the defining instructions.
437193323Sed    unsigned defIndex = getDefIndex(MIIdx);
438193323Sed    // Earlyclobbers move back one.
439193323Sed    if (MO.isEarlyClobber())
440193323Sed      defIndex = getUseIndex(MIIdx);
441193323Sed    VNInfo *ValNo;
442193323Sed    MachineInstr *CopyMI = NULL;
443193323Sed    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
444193323Sed    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
445193323Sed        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
446193323Sed        mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
447193323Sed        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
448193323Sed      CopyMI = mi;
449193323Sed    // Earlyclobbers move back one.
450193323Sed    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
451193323Sed
452193323Sed    assert(ValNo->id == 0 && "First value in interval is not 0?");
453193323Sed
454193323Sed    // Loop over all of the blocks that the vreg is defined in.  There are
455193323Sed    // two cases we have to handle here.  The most common case is a vreg
456193323Sed    // whose lifetime is contained within a basic block.  In this case there
457193323Sed    // will be a single kill, in MBB, which comes after the definition.
458193323Sed    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
459193323Sed      // FIXME: what about dead vars?
460193323Sed      unsigned killIdx;
461193323Sed      if (vi.Kills[0] != mi)
462193323Sed        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
463193323Sed      else
464193323Sed        killIdx = defIndex+1;
465193323Sed
466193323Sed      // If the kill happens after the definition, we have an intra-block
467193323Sed      // live range.
468193323Sed      if (killIdx > defIndex) {
469193323Sed        assert(vi.AliveBlocks.empty() &&
470193323Sed               "Shouldn't be alive across any blocks!");
471193323Sed        LiveRange LR(defIndex, killIdx, ValNo);
472193323Sed        interval.addRange(LR);
473193323Sed        DOUT << " +" << LR << "\n";
474193323Sed        interval.addKill(ValNo, killIdx);
475193323Sed        return;
476193323Sed      }
477193323Sed    }
478193323Sed
479193323Sed    // The other case we handle is when a virtual register lives to the end
480193323Sed    // of the defining block, potentially live across some blocks, then is
481193323Sed    // live into some number of blocks, but gets killed.  Start by adding a
482193323Sed    // range that goes from this definition to the end of the defining block.
483193323Sed    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
484193323Sed    DOUT << " +" << NewLR;
485193323Sed    interval.addRange(NewLR);
486193323Sed
487193323Sed    // Iterate over all of the blocks that the variable is completely
488193323Sed    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
489193323Sed    // live interval.
490193323Sed    for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
491193323Sed             E = vi.AliveBlocks.end(); I != E; ++I) {
492193323Sed      LiveRange LR(getMBBStartIdx(*I),
493193323Sed                   getMBBEndIdx(*I)+1,  // MBB ends at -1.
494193323Sed                   ValNo);
495193323Sed      interval.addRange(LR);
496193323Sed      DOUT << " +" << LR;
497193323Sed    }
498193323Sed
499193323Sed    // Finally, this virtual register is live from the start of any killing
500193323Sed    // block to the 'use' slot of the killing instruction.
501193323Sed    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
502193323Sed      MachineInstr *Kill = vi.Kills[i];
503193323Sed      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
504193323Sed      LiveRange LR(getMBBStartIdx(Kill->getParent()),
505193323Sed                   killIdx, ValNo);
506193323Sed      interval.addRange(LR);
507193323Sed      interval.addKill(ValNo, killIdx);
508193323Sed      DOUT << " +" << LR;
509193323Sed    }
510193323Sed
511193323Sed  } else {
512193323Sed    // If this is the second time we see a virtual register definition, it
513193323Sed    // must be due to phi elimination or two addr elimination.  If this is
514193323Sed    // the result of two address elimination, then the vreg is one of the
515193323Sed    // def-and-use register operand.
516193323Sed    if (mi->isRegTiedToUseOperand(MOIdx)) {
517193323Sed      // If this is a two-address definition, then we have already processed
518193323Sed      // the live range.  The only problem is that we didn't realize there
519193323Sed      // are actually two values in the live interval.  Because of this we
520193323Sed      // need to take the LiveRegion that defines this register and split it
521193323Sed      // into two values.
522193323Sed      assert(interval.containsOneValue());
523193323Sed      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
524193323Sed      unsigned RedefIndex = getDefIndex(MIIdx);
525193323Sed      if (MO.isEarlyClobber())
526193323Sed        RedefIndex = getUseIndex(MIIdx);
527193323Sed
528193323Sed      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
529193323Sed      VNInfo *OldValNo = OldLR->valno;
530193323Sed
531193323Sed      // Delete the initial value, which should be short and continuous,
532193323Sed      // because the 2-addr copy must be in the same MBB as the redef.
533193323Sed      interval.removeRange(DefIndex, RedefIndex);
534193323Sed
535193323Sed      // Two-address vregs should always only be redefined once.  This means
536193323Sed      // that at this point, there should be exactly one value number in it.
537193323Sed      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
538193323Sed
539193323Sed      // The new value number (#1) is defined by the instruction we claimed
540193323Sed      // defined value #0.
541193323Sed      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
542193323Sed                                            VNInfoAllocator);
543193323Sed
544193323Sed      // Value#0 is now defined by the 2-addr instruction.
545193323Sed      OldValNo->def  = RedefIndex;
546193323Sed      OldValNo->copy = 0;
547193323Sed      if (MO.isEarlyClobber())
548193323Sed        OldValNo->redefByEC = true;
549193323Sed
550193323Sed      // Add the new live interval which replaces the range for the input copy.
551193323Sed      LiveRange LR(DefIndex, RedefIndex, ValNo);
552193323Sed      DOUT << " replace range with " << LR;
553193323Sed      interval.addRange(LR);
554193323Sed      interval.addKill(ValNo, RedefIndex);
555193323Sed
556193323Sed      // If this redefinition is dead, we need to add a dummy unit live
557193323Sed      // range covering the def slot.
558193323Sed      if (MO.isDead())
559193323Sed        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
560193323Sed
561193323Sed      DOUT << " RESULT: ";
562193323Sed      interval.print(DOUT, tri_);
563193323Sed
564193323Sed    } else {
565193323Sed      // Otherwise, this must be because of phi elimination.  If this is the
566193323Sed      // first redefinition of the vreg that we have seen, go back and change
567193323Sed      // the live range in the PHI block to be a different value number.
568193323Sed      if (interval.containsOneValue()) {
569193323Sed        assert(vi.Kills.size() == 1 &&
570193323Sed               "PHI elimination vreg should have one kill, the PHI itself!");
571193323Sed
572193323Sed        // Remove the old range that we now know has an incorrect number.
573193323Sed        VNInfo *VNI = interval.getValNumInfo(0);
574193323Sed        MachineInstr *Killer = vi.Kills[0];
575193323Sed        unsigned Start = getMBBStartIdx(Killer->getParent());
576193323Sed        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
577193323Sed        DOUT << " Removing [" << Start << "," << End << "] from: ";
578193323Sed        interval.print(DOUT, tri_); DOUT << "\n";
579193323Sed        interval.removeRange(Start, End);
580193323Sed        VNI->hasPHIKill = true;
581193323Sed        DOUT << " RESULT: "; interval.print(DOUT, tri_);
582193323Sed
583193323Sed        // Replace the interval with one of a NEW value number.  Note that this
584193323Sed        // value number isn't actually defined by an instruction, weird huh? :)
585193323Sed        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
586193323Sed        DOUT << " replace range with " << LR;
587193323Sed        interval.addRange(LR);
588193323Sed        interval.addKill(LR.valno, End);
589193323Sed        DOUT << " RESULT: "; interval.print(DOUT, tri_);
590193323Sed      }
591193323Sed
592193323Sed      // In the case of PHI elimination, each variable definition is only
593193323Sed      // live until the end of the block.  We've already taken care of the
594193323Sed      // rest of the live range.
595193323Sed      unsigned defIndex = getDefIndex(MIIdx);
596193323Sed      if (MO.isEarlyClobber())
597193323Sed        defIndex = getUseIndex(MIIdx);
598193323Sed
599193323Sed      VNInfo *ValNo;
600193323Sed      MachineInstr *CopyMI = NULL;
601193323Sed      unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
602193323Sed      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
603193323Sed          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
604193323Sed          mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
605193323Sed          tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
606193323Sed        CopyMI = mi;
607193323Sed      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
608193323Sed
609193323Sed      unsigned killIndex = getMBBEndIdx(mbb) + 1;
610193323Sed      LiveRange LR(defIndex, killIndex, ValNo);
611193323Sed      interval.addRange(LR);
612193323Sed      interval.addKill(ValNo, killIndex);
613193323Sed      ValNo->hasPHIKill = true;
614193323Sed      DOUT << " +" << LR;
615193323Sed    }
616193323Sed  }
617193323Sed
618193323Sed  DOUT << '\n';
619193323Sed}
620193323Sed
621193323Sedvoid LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
622193323Sed                                              MachineBasicBlock::iterator mi,
623193323Sed                                              unsigned MIIdx,
624193323Sed                                              MachineOperand& MO,
625193323Sed                                              LiveInterval &interval,
626193323Sed                                              MachineInstr *CopyMI) {
627193323Sed  // A physical register cannot be live across basic block, so its
628193323Sed  // lifetime must end somewhere in its defining basic block.
629193323Sed  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
630193323Sed
631193323Sed  unsigned baseIndex = MIIdx;
632193323Sed  unsigned start = getDefIndex(baseIndex);
633193323Sed  // Earlyclobbers move back one.
634193323Sed  if (MO.isEarlyClobber())
635193323Sed    start = getUseIndex(MIIdx);
636193323Sed  unsigned end = start;
637193323Sed
638193323Sed  // If it is not used after definition, it is considered dead at
639193323Sed  // the instruction defining it. Hence its interval is:
640193323Sed  // [defSlot(def), defSlot(def)+1)
641193323Sed  if (MO.isDead()) {
642193323Sed    DOUT << " dead";
643193323Sed    end = start + 1;
644193323Sed    goto exit;
645193323Sed  }
646193323Sed
647193323Sed  // If it is not dead on definition, it must be killed by a
648193323Sed  // subsequent instruction. Hence its interval is:
649193323Sed  // [defSlot(def), useSlot(kill)+1)
650193323Sed  baseIndex += InstrSlots::NUM;
651193323Sed  while (++mi != MBB->end()) {
652193323Sed    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
653193323Sed           getInstructionFromIndex(baseIndex) == 0)
654193323Sed      baseIndex += InstrSlots::NUM;
655193323Sed    if (mi->killsRegister(interval.reg, tri_)) {
656193323Sed      DOUT << " killed";
657193323Sed      end = getUseIndex(baseIndex) + 1;
658193323Sed      goto exit;
659193323Sed    } else {
660193323Sed      int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
661193323Sed      if (DefIdx != -1) {
662193323Sed        if (mi->isRegTiedToUseOperand(DefIdx)) {
663193323Sed          // Two-address instruction.
664193323Sed          end = getDefIndex(baseIndex);
665193323Sed          if (mi->getOperand(DefIdx).isEarlyClobber())
666193323Sed            end = getUseIndex(baseIndex);
667193323Sed        } else {
668193323Sed          // Another instruction redefines the register before it is ever read.
669193323Sed          // Then the register is essentially dead at the instruction that defines
670193323Sed          // it. Hence its interval is:
671193323Sed          // [defSlot(def), defSlot(def)+1)
672193323Sed          DOUT << " dead";
673193323Sed          end = start + 1;
674193323Sed        }
675193323Sed        goto exit;
676193323Sed      }
677193323Sed    }
678193323Sed
679193323Sed    baseIndex += InstrSlots::NUM;
680193323Sed  }
681193323Sed
682193323Sed  // The only case we should have a dead physreg here without a killing or
683193323Sed  // instruction where we know it's dead is if it is live-in to the function
684193323Sed  // and never used. Another possible case is the implicit use of the
685193323Sed  // physical register has been deleted by two-address pass.
686193323Sed  end = start + 1;
687193323Sed
688193323Sedexit:
689193323Sed  assert(start < end && "did not find end of interval?");
690193323Sed
691193323Sed  // Already exists? Extend old live interval.
692193323Sed  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
693193323Sed  bool Extend = OldLR != interval.end();
694193323Sed  VNInfo *ValNo = Extend
695193323Sed    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
696193323Sed  if (MO.isEarlyClobber() && Extend)
697193323Sed    ValNo->redefByEC = true;
698193323Sed  LiveRange LR(start, end, ValNo);
699193323Sed  interval.addRange(LR);
700193323Sed  interval.addKill(LR.valno, end);
701193323Sed  DOUT << " +" << LR << '\n';
702193323Sed}
703193323Sed
704193323Sedvoid LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
705193323Sed                                      MachineBasicBlock::iterator MI,
706193323Sed                                      unsigned MIIdx,
707193323Sed                                      MachineOperand& MO,
708193323Sed                                      unsigned MOIdx) {
709193323Sed  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
710193323Sed    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
711193323Sed                             getOrCreateInterval(MO.getReg()));
712193323Sed  else if (allocatableRegs_[MO.getReg()]) {
713193323Sed    MachineInstr *CopyMI = NULL;
714193323Sed    unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
715193323Sed    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
716193323Sed        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
717193323Sed        MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
718193323Sed        tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
719193323Sed      CopyMI = MI;
720193323Sed    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
721193323Sed                              getOrCreateInterval(MO.getReg()), CopyMI);
722193323Sed    // Def of a register also defines its sub-registers.
723193323Sed    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
724193323Sed      // If MI also modifies the sub-register explicitly, avoid processing it
725193323Sed      // more than once. Do not pass in TRI here so it checks for exact match.
726193323Sed      if (!MI->modifiesRegister(*AS))
727193323Sed        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
728193323Sed                                  getOrCreateInterval(*AS), 0);
729193323Sed  }
730193323Sed}
731193323Sed
732193323Sedvoid LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
733193323Sed                                         unsigned MIIdx,
734193323Sed                                         LiveInterval &interval, bool isAlias) {
735193323Sed  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
736193323Sed
737193323Sed  // Look for kills, if it reaches a def before it's killed, then it shouldn't
738193323Sed  // be considered a livein.
739193323Sed  MachineBasicBlock::iterator mi = MBB->begin();
740193323Sed  unsigned baseIndex = MIIdx;
741193323Sed  unsigned start = baseIndex;
742193323Sed  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
743193323Sed         getInstructionFromIndex(baseIndex) == 0)
744193323Sed    baseIndex += InstrSlots::NUM;
745193323Sed  unsigned end = baseIndex;
746193323Sed  bool SeenDefUse = false;
747193323Sed
748193323Sed  while (mi != MBB->end()) {
749193323Sed    if (mi->killsRegister(interval.reg, tri_)) {
750193323Sed      DOUT << " killed";
751193323Sed      end = getUseIndex(baseIndex) + 1;
752193323Sed      SeenDefUse = true;
753193323Sed      goto exit;
754193323Sed    } else if (mi->modifiesRegister(interval.reg, tri_)) {
755193323Sed      // Another instruction redefines the register before it is ever read.
756193323Sed      // Then the register is essentially dead at the instruction that defines
757193323Sed      // it. Hence its interval is:
758193323Sed      // [defSlot(def), defSlot(def)+1)
759193323Sed      DOUT << " dead";
760193323Sed      end = getDefIndex(start) + 1;
761193323Sed      SeenDefUse = true;
762193323Sed      goto exit;
763193323Sed    }
764193323Sed
765193323Sed    baseIndex += InstrSlots::NUM;
766193323Sed    ++mi;
767193323Sed    if (mi != MBB->end()) {
768193323Sed      while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
769193323Sed             getInstructionFromIndex(baseIndex) == 0)
770193323Sed        baseIndex += InstrSlots::NUM;
771193323Sed    }
772193323Sed  }
773193323Sed
774193323Sedexit:
775193323Sed  // Live-in register might not be used at all.
776193323Sed  if (!SeenDefUse) {
777193323Sed    if (isAlias) {
778193323Sed      DOUT << " dead";
779193323Sed      end = getDefIndex(MIIdx) + 1;
780193323Sed    } else {
781193323Sed      DOUT << " live through";
782193323Sed      end = baseIndex;
783193323Sed    }
784193323Sed  }
785193323Sed
786193323Sed  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
787193323Sed  interval.addRange(LR);
788193323Sed  interval.addKill(LR.valno, end);
789193323Sed  DOUT << " +" << LR << '\n';
790193323Sed}
791193323Sed
792193323Sed/// computeIntervals - computes the live intervals for virtual
793193323Sed/// registers. for some ordering of the machine instructions [1,N] a
794193323Sed/// live interval is an interval [i, j) where 1 <= i <= j < N for
795193323Sed/// which a variable is live
796193323Sedvoid LiveIntervals::computeIntervals() {
797193323Sed
798193323Sed  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
799193323Sed       << "********** Function: "
800193323Sed       << ((Value*)mf_->getFunction())->getName() << '\n';
801193323Sed
802193323Sed  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
803193323Sed       MBBI != E; ++MBBI) {
804193323Sed    MachineBasicBlock *MBB = MBBI;
805193323Sed    // Track the index of the current machine instr.
806193323Sed    unsigned MIIndex = getMBBStartIdx(MBB);
807193323Sed    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
808193323Sed
809193323Sed    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
810193323Sed
811193323Sed    // Create intervals for live-ins to this BB first.
812193323Sed    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
813193323Sed           LE = MBB->livein_end(); LI != LE; ++LI) {
814193323Sed      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
815193323Sed      // Multiple live-ins can alias the same register.
816193323Sed      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
817193323Sed        if (!hasInterval(*AS))
818193323Sed          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
819193323Sed                               true);
820193323Sed    }
821193323Sed
822193323Sed    // Skip over empty initial indices.
823193323Sed    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
824193323Sed           getInstructionFromIndex(MIIndex) == 0)
825193323Sed      MIIndex += InstrSlots::NUM;
826193323Sed
827193323Sed    for (; MI != miEnd; ++MI) {
828193323Sed      DOUT << MIIndex << "\t" << *MI;
829193323Sed
830193323Sed      // Handle defs.
831193323Sed      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
832193323Sed        MachineOperand &MO = MI->getOperand(i);
833193323Sed        // handle register defs - build intervals
834193323Sed        if (MO.isReg() && MO.getReg() && MO.isDef()) {
835193323Sed          handleRegisterDef(MBB, MI, MIIndex, MO, i);
836193323Sed        }
837193323Sed      }
838193323Sed
839193323Sed      // Skip over the empty slots after each instruction.
840193323Sed      unsigned Slots = MI->getDesc().getNumDefs();
841193323Sed      if (Slots == 0)
842193323Sed        Slots = 1;
843193323Sed      MIIndex += InstrSlots::NUM * Slots;
844193323Sed
845193323Sed      // Skip over empty indices.
846193323Sed      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
847193323Sed             getInstructionFromIndex(MIIndex) == 0)
848193323Sed        MIIndex += InstrSlots::NUM;
849193323Sed    }
850193323Sed  }
851193323Sed}
852193323Sed
853193323Sedbool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
854193323Sed                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
855193323Sed  std::vector<IdxMBBPair>::const_iterator I =
856193323Sed    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
857193323Sed
858193323Sed  bool ResVal = false;
859193323Sed  while (I != Idx2MBBMap.end()) {
860193323Sed    if (I->first >= End)
861193323Sed      break;
862193323Sed    MBBs.push_back(I->second);
863193323Sed    ResVal = true;
864193323Sed    ++I;
865193323Sed  }
866193323Sed  return ResVal;
867193323Sed}
868193323Sed
869193323Sedbool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
870193323Sed                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
871193323Sed  std::vector<IdxMBBPair>::const_iterator I =
872193323Sed    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
873193323Sed
874193323Sed  bool ResVal = false;
875193323Sed  while (I != Idx2MBBMap.end()) {
876193323Sed    if (I->first > End)
877193323Sed      break;
878193323Sed    MachineBasicBlock *MBB = I->second;
879193323Sed    if (getMBBEndIdx(MBB) > End)
880193323Sed      break;
881193323Sed    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
882193323Sed           SE = MBB->succ_end(); SI != SE; ++SI)
883193323Sed      MBBs.push_back(*SI);
884193323Sed    ResVal = true;
885193323Sed    ++I;
886193323Sed  }
887193323Sed  return ResVal;
888193323Sed}
889193323Sed
890193323SedLiveInterval* LiveIntervals::createInterval(unsigned reg) {
891193323Sed  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
892193323Sed  return new LiveInterval(reg, Weight);
893193323Sed}
894193323Sed
895193323Sed/// dupInterval - Duplicate a live interval. The caller is responsible for
896193323Sed/// managing the allocated memory.
897193323SedLiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
898193323Sed  LiveInterval *NewLI = createInterval(li->reg);
899193323Sed  NewLI->Copy(*li, getVNInfoAllocator());
900193323Sed  return NewLI;
901193323Sed}
902193323Sed
903193323Sed/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
904193323Sed/// copy field and returns the source register that defines it.
905193323Sedunsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
906193323Sed  if (!VNI->copy)
907193323Sed    return 0;
908193323Sed
909193323Sed  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
910193323Sed    // If it's extracting out of a physical register, return the sub-register.
911193323Sed    unsigned Reg = VNI->copy->getOperand(1).getReg();
912193323Sed    if (TargetRegisterInfo::isPhysicalRegister(Reg))
913193323Sed      Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
914193323Sed    return Reg;
915193323Sed  } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
916193323Sed             VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
917193323Sed    return VNI->copy->getOperand(2).getReg();
918193323Sed
919193323Sed  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
920193323Sed  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
921193323Sed    return SrcReg;
922193323Sed  assert(0 && "Unrecognized copy instruction!");
923193323Sed  return 0;
924193323Sed}
925193323Sed
926193323Sed//===----------------------------------------------------------------------===//
927193323Sed// Register allocator hooks.
928193323Sed//
929193323Sed
930193323Sed/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
931193323Sed/// allow one) virtual register operand, then its uses are implicitly using
932193323Sed/// the register. Returns the virtual register.
933193323Sedunsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
934193323Sed                                            MachineInstr *MI) const {
935193323Sed  unsigned RegOp = 0;
936193323Sed  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
937193323Sed    MachineOperand &MO = MI->getOperand(i);
938193323Sed    if (!MO.isReg() || !MO.isUse())
939193323Sed      continue;
940193323Sed    unsigned Reg = MO.getReg();
941193323Sed    if (Reg == 0 || Reg == li.reg)
942193323Sed      continue;
943193323Sed    // FIXME: For now, only remat MI with at most one register operand.
944193323Sed    assert(!RegOp &&
945193323Sed           "Can't rematerialize instruction with multiple register operand!");
946193323Sed    RegOp = MO.getReg();
947193323Sed#ifndef NDEBUG
948193323Sed    break;
949193323Sed#endif
950193323Sed  }
951193323Sed  return RegOp;
952193323Sed}
953193323Sed
954193323Sed/// isValNoAvailableAt - Return true if the val# of the specified interval
955193323Sed/// which reaches the given instruction also reaches the specified use index.
956193323Sedbool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
957193323Sed                                       unsigned UseIdx) const {
958193323Sed  unsigned Index = getInstructionIndex(MI);
959193323Sed  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
960193323Sed  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
961193323Sed  return UI != li.end() && UI->valno == ValNo;
962193323Sed}
963193323Sed
964193323Sed/// isReMaterializable - Returns true if the definition MI of the specified
965193323Sed/// val# of the specified interval is re-materializable.
966193323Sedbool LiveIntervals::isReMaterializable(const LiveInterval &li,
967193323Sed                                       const VNInfo *ValNo, MachineInstr *MI,
968193323Sed                                       SmallVectorImpl<LiveInterval*> &SpillIs,
969193323Sed                                       bool &isLoad) {
970193323Sed  if (DisableReMat)
971193323Sed    return false;
972193323Sed
973193323Sed  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
974193323Sed    return true;
975193323Sed
976193323Sed  int FrameIdx = 0;
977193323Sed  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
978193323Sed      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
979193323Sed    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
980193323Sed    // this but remember this is not safe to fold into a two-address
981193323Sed    // instruction.
982193323Sed    // This is a load from fixed stack slot. It can be rematerialized.
983193323Sed    return true;
984193323Sed
985193323Sed  // If the target-specific rules don't identify an instruction as
986193323Sed  // being trivially rematerializable, use some target-independent
987193323Sed  // rules.
988193323Sed  if (!MI->getDesc().isRematerializable() ||
989193323Sed      !tii_->isTriviallyReMaterializable(MI)) {
990193323Sed    if (!EnableAggressiveRemat)
991193323Sed      return false;
992193323Sed
993193323Sed    // If the instruction accesses memory but the memoperands have been lost,
994193323Sed    // we can't analyze it.
995193323Sed    const TargetInstrDesc &TID = MI->getDesc();
996193323Sed    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
997193323Sed      return false;
998193323Sed
999193323Sed    // Avoid instructions obviously unsafe for remat.
1000193323Sed    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1001193323Sed      return false;
1002193323Sed
1003193323Sed    // If the instruction accesses memory and the memory could be non-constant,
1004193323Sed    // assume the instruction is not rematerializable.
1005193323Sed    for (std::list<MachineMemOperand>::const_iterator
1006193323Sed           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1007193323Sed      const MachineMemOperand &MMO = *I;
1008193323Sed      if (MMO.isVolatile() || MMO.isStore())
1009193323Sed        return false;
1010193323Sed      const Value *V = MMO.getValue();
1011193323Sed      if (!V)
1012193323Sed        return false;
1013193323Sed      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1014193323Sed        if (!PSV->isConstant(mf_->getFrameInfo()))
1015193323Sed          return false;
1016193323Sed      } else if (!aa_->pointsToConstantMemory(V))
1017193323Sed        return false;
1018193323Sed    }
1019193323Sed
1020193323Sed    // If any of the registers accessed are non-constant, conservatively assume
1021193323Sed    // the instruction is not rematerializable.
1022193323Sed    unsigned ImpUse = 0;
1023193323Sed    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1024193323Sed      const MachineOperand &MO = MI->getOperand(i);
1025193323Sed      if (MO.isReg()) {
1026193323Sed        unsigned Reg = MO.getReg();
1027193323Sed        if (Reg == 0)
1028193323Sed          continue;
1029193323Sed        if (TargetRegisterInfo::isPhysicalRegister(Reg))
1030193323Sed          return false;
1031193323Sed
1032193323Sed        // Only allow one def, and that in the first operand.
1033193323Sed        if (MO.isDef() != (i == 0))
1034193323Sed          return false;
1035193323Sed
1036193323Sed        // Only allow constant-valued registers.
1037193323Sed        bool IsLiveIn = mri_->isLiveIn(Reg);
1038193323Sed        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1039193323Sed                                          E = mri_->def_end();
1040193323Sed
1041193323Sed        // For the def, it should be the only def of that register.
1042193323Sed        if (MO.isDef() && (next(I) != E || IsLiveIn))
1043193323Sed          return false;
1044193323Sed
1045193323Sed        if (MO.isUse()) {
1046193323Sed          // Only allow one use other register use, as that's all the
1047193323Sed          // remat mechanisms support currently.
1048193323Sed          if (Reg != li.reg) {
1049193323Sed            if (ImpUse == 0)
1050193323Sed              ImpUse = Reg;
1051193323Sed            else if (Reg != ImpUse)
1052193323Sed              return false;
1053193323Sed          }
1054193323Sed          // For the use, there should be only one associated def.
1055193323Sed          if (I != E && (next(I) != E || IsLiveIn))
1056193323Sed            return false;
1057193323Sed        }
1058193323Sed      }
1059193323Sed    }
1060193323Sed  }
1061193323Sed
1062193323Sed  unsigned ImpUse = getReMatImplicitUse(li, MI);
1063193323Sed  if (ImpUse) {
1064193323Sed    const LiveInterval &ImpLi = getInterval(ImpUse);
1065193323Sed    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1066193323Sed           re = mri_->use_end(); ri != re; ++ri) {
1067193323Sed      MachineInstr *UseMI = &*ri;
1068193323Sed      unsigned UseIdx = getInstructionIndex(UseMI);
1069193323Sed      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1070193323Sed        continue;
1071193323Sed      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1072193323Sed        return false;
1073193323Sed    }
1074193323Sed
1075193323Sed    // If a register operand of the re-materialized instruction is going to
1076193323Sed    // be spilled next, then it's not legal to re-materialize this instruction.
1077193323Sed    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1078193323Sed      if (ImpUse == SpillIs[i]->reg)
1079193323Sed        return false;
1080193323Sed  }
1081193323Sed  return true;
1082193323Sed}
1083193323Sed
1084193323Sed/// isReMaterializable - Returns true if the definition MI of the specified
1085193323Sed/// val# of the specified interval is re-materializable.
1086193323Sedbool LiveIntervals::isReMaterializable(const LiveInterval &li,
1087193323Sed                                       const VNInfo *ValNo, MachineInstr *MI) {
1088193323Sed  SmallVector<LiveInterval*, 4> Dummy1;
1089193323Sed  bool Dummy2;
1090193323Sed  return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1091193323Sed}
1092193323Sed
1093193323Sed/// isReMaterializable - Returns true if every definition of MI of every
1094193323Sed/// val# of the specified interval is re-materializable.
1095193323Sedbool LiveIntervals::isReMaterializable(const LiveInterval &li,
1096193323Sed                                       SmallVectorImpl<LiveInterval*> &SpillIs,
1097193323Sed                                       bool &isLoad) {
1098193323Sed  isLoad = false;
1099193323Sed  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1100193323Sed       i != e; ++i) {
1101193323Sed    const VNInfo *VNI = *i;
1102193323Sed    unsigned DefIdx = VNI->def;
1103193323Sed    if (DefIdx == ~1U)
1104193323Sed      continue; // Dead val#.
1105193323Sed    // Is the def for the val# rematerializable?
1106193323Sed    if (DefIdx == ~0u)
1107193323Sed      return false;
1108193323Sed    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1109193323Sed    bool DefIsLoad = false;
1110193323Sed    if (!ReMatDefMI ||
1111193323Sed        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1112193323Sed      return false;
1113193323Sed    isLoad |= DefIsLoad;
1114193323Sed  }
1115193323Sed  return true;
1116193323Sed}
1117193323Sed
1118193323Sed/// FilterFoldedOps - Filter out two-address use operands. Return
1119193323Sed/// true if it finds any issue with the operands that ought to prevent
1120193323Sed/// folding.
1121193323Sedstatic bool FilterFoldedOps(MachineInstr *MI,
1122193323Sed                            SmallVector<unsigned, 2> &Ops,
1123193323Sed                            unsigned &MRInfo,
1124193323Sed                            SmallVector<unsigned, 2> &FoldOps) {
1125193323Sed  MRInfo = 0;
1126193323Sed  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1127193323Sed    unsigned OpIdx = Ops[i];
1128193323Sed    MachineOperand &MO = MI->getOperand(OpIdx);
1129193323Sed    // FIXME: fold subreg use.
1130193323Sed    if (MO.getSubReg())
1131193323Sed      return true;
1132193323Sed    if (MO.isDef())
1133193323Sed      MRInfo |= (unsigned)VirtRegMap::isMod;
1134193323Sed    else {
1135193323Sed      // Filter out two-address use operand(s).
1136193323Sed      if (MI->isRegTiedToDefOperand(OpIdx)) {
1137193323Sed        MRInfo = VirtRegMap::isModRef;
1138193323Sed        continue;
1139193323Sed      }
1140193323Sed      MRInfo |= (unsigned)VirtRegMap::isRef;
1141193323Sed    }
1142193323Sed    FoldOps.push_back(OpIdx);
1143193323Sed  }
1144193323Sed  return false;
1145193323Sed}
1146193323Sed
1147193323Sed
1148193323Sed/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1149193323Sed/// slot / to reg or any rematerialized load into ith operand of specified
1150193323Sed/// MI. If it is successul, MI is updated with the newly created MI and
1151193323Sed/// returns true.
1152193323Sedbool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1153193323Sed                                         VirtRegMap &vrm, MachineInstr *DefMI,
1154193323Sed                                         unsigned InstrIdx,
1155193323Sed                                         SmallVector<unsigned, 2> &Ops,
1156193323Sed                                         bool isSS, int Slot, unsigned Reg) {
1157193323Sed  // If it is an implicit def instruction, just delete it.
1158193323Sed  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1159193323Sed    RemoveMachineInstrFromMaps(MI);
1160193323Sed    vrm.RemoveMachineInstrFromMaps(MI);
1161193323Sed    MI->eraseFromParent();
1162193323Sed    ++numFolds;
1163193323Sed    return true;
1164193323Sed  }
1165193323Sed
1166193323Sed  // Filter the list of operand indexes that are to be folded. Abort if
1167193323Sed  // any operand will prevent folding.
1168193323Sed  unsigned MRInfo = 0;
1169193323Sed  SmallVector<unsigned, 2> FoldOps;
1170193323Sed  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1171193323Sed    return false;
1172193323Sed
1173193323Sed  // The only time it's safe to fold into a two address instruction is when
1174193323Sed  // it's folding reload and spill from / into a spill stack slot.
1175193323Sed  if (DefMI && (MRInfo & VirtRegMap::isMod))
1176193323Sed    return false;
1177193323Sed
1178193323Sed  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1179193323Sed                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1180193323Sed  if (fmi) {
1181193323Sed    // Remember this instruction uses the spill slot.
1182193323Sed    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1183193323Sed
1184193323Sed    // Attempt to fold the memory reference into the instruction. If
1185193323Sed    // we can do this, we don't need to insert spill code.
1186193323Sed    MachineBasicBlock &MBB = *MI->getParent();
1187193323Sed    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1188193323Sed      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1189193323Sed    vrm.transferSpillPts(MI, fmi);
1190193323Sed    vrm.transferRestorePts(MI, fmi);
1191193323Sed    vrm.transferEmergencySpills(MI, fmi);
1192193323Sed    mi2iMap_.erase(MI);
1193193323Sed    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1194193323Sed    mi2iMap_[fmi] = InstrIdx;
1195193323Sed    MI = MBB.insert(MBB.erase(MI), fmi);
1196193323Sed    ++numFolds;
1197193323Sed    return true;
1198193323Sed  }
1199193323Sed  return false;
1200193323Sed}
1201193323Sed
1202193323Sed/// canFoldMemoryOperand - Returns true if the specified load / store
1203193323Sed/// folding is possible.
1204193323Sedbool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1205193323Sed                                         SmallVector<unsigned, 2> &Ops,
1206193323Sed                                         bool ReMat) const {
1207193323Sed  // Filter the list of operand indexes that are to be folded. Abort if
1208193323Sed  // any operand will prevent folding.
1209193323Sed  unsigned MRInfo = 0;
1210193323Sed  SmallVector<unsigned, 2> FoldOps;
1211193323Sed  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1212193323Sed    return false;
1213193323Sed
1214193323Sed  // It's only legal to remat for a use, not a def.
1215193323Sed  if (ReMat && (MRInfo & VirtRegMap::isMod))
1216193323Sed    return false;
1217193323Sed
1218193323Sed  return tii_->canFoldMemoryOperand(MI, FoldOps);
1219193323Sed}
1220193323Sed
1221193323Sedbool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1222193323Sed  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1223193323Sed  for (LiveInterval::Ranges::const_iterator
1224193323Sed         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1225193323Sed    std::vector<IdxMBBPair>::const_iterator II =
1226193323Sed      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1227193323Sed    if (II == Idx2MBBMap.end())
1228193323Sed      continue;
1229193323Sed    if (I->end > II->first)  // crossing a MBB.
1230193323Sed      return false;
1231193323Sed    MBBs.insert(II->second);
1232193323Sed    if (MBBs.size() > 1)
1233193323Sed      return false;
1234193323Sed  }
1235193323Sed  return true;
1236193323Sed}
1237193323Sed
1238193323Sed/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1239193323Sed/// interval on to-be re-materialized operands of MI) with new register.
1240193323Sedvoid LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1241193323Sed                                       MachineInstr *MI, unsigned NewVReg,
1242193323Sed                                       VirtRegMap &vrm) {
1243193323Sed  // There is an implicit use. That means one of the other operand is
1244193323Sed  // being remat'ed and the remat'ed instruction has li.reg as an
1245193323Sed  // use operand. Make sure we rewrite that as well.
1246193323Sed  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1247193323Sed    MachineOperand &MO = MI->getOperand(i);
1248193323Sed    if (!MO.isReg())
1249193323Sed      continue;
1250193323Sed    unsigned Reg = MO.getReg();
1251193323Sed    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1252193323Sed      continue;
1253193323Sed    if (!vrm.isReMaterialized(Reg))
1254193323Sed      continue;
1255193323Sed    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1256193323Sed    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1257193323Sed    if (UseMO)
1258193323Sed      UseMO->setReg(NewVReg);
1259193323Sed  }
1260193323Sed}
1261193323Sed
1262193323Sed/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1263193323Sed/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1264193323Sedbool LiveIntervals::
1265193323SedrewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1266193323Sed                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1267193323Sed                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1268193323Sed                 unsigned Slot, int LdSlot,
1269193323Sed                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1270193323Sed                 VirtRegMap &vrm,
1271193323Sed                 const TargetRegisterClass* rc,
1272193323Sed                 SmallVector<int, 4> &ReMatIds,
1273193323Sed                 const MachineLoopInfo *loopInfo,
1274193323Sed                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1275193323Sed                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1276193323Sed                 std::vector<LiveInterval*> &NewLIs) {
1277193323Sed  bool CanFold = false;
1278193323Sed RestartInstruction:
1279193323Sed  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1280193323Sed    MachineOperand& mop = MI->getOperand(i);
1281193323Sed    if (!mop.isReg())
1282193323Sed      continue;
1283193323Sed    unsigned Reg = mop.getReg();
1284193323Sed    unsigned RegI = Reg;
1285193323Sed    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1286193323Sed      continue;
1287193323Sed    if (Reg != li.reg)
1288193323Sed      continue;
1289193323Sed
1290193323Sed    bool TryFold = !DefIsReMat;
1291193323Sed    bool FoldSS = true; // Default behavior unless it's a remat.
1292193323Sed    int FoldSlot = Slot;
1293193323Sed    if (DefIsReMat) {
1294193323Sed      // If this is the rematerializable definition MI itself and
1295193323Sed      // all of its uses are rematerialized, simply delete it.
1296193323Sed      if (MI == ReMatOrigDefMI && CanDelete) {
1297193323Sed        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1298193323Sed        DOUT << MI << '\n';
1299193323Sed        RemoveMachineInstrFromMaps(MI);
1300193323Sed        vrm.RemoveMachineInstrFromMaps(MI);
1301193323Sed        MI->eraseFromParent();
1302193323Sed        break;
1303193323Sed      }
1304193323Sed
1305193323Sed      // If def for this use can't be rematerialized, then try folding.
1306193323Sed      // If def is rematerializable and it's a load, also try folding.
1307193323Sed      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1308193323Sed      if (isLoad) {
1309193323Sed        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1310193323Sed        FoldSS = isLoadSS;
1311193323Sed        FoldSlot = LdSlot;
1312193323Sed      }
1313193323Sed    }
1314193323Sed
1315193323Sed    // Scan all of the operands of this instruction rewriting operands
1316193323Sed    // to use NewVReg instead of li.reg as appropriate.  We do this for
1317193323Sed    // two reasons:
1318193323Sed    //
1319193323Sed    //   1. If the instr reads the same spilled vreg multiple times, we
1320193323Sed    //      want to reuse the NewVReg.
1321193323Sed    //   2. If the instr is a two-addr instruction, we are required to
1322193323Sed    //      keep the src/dst regs pinned.
1323193323Sed    //
1324193323Sed    // Keep track of whether we replace a use and/or def so that we can
1325193323Sed    // create the spill interval with the appropriate range.
1326193323Sed
1327193323Sed    HasUse = mop.isUse();
1328193323Sed    HasDef = mop.isDef();
1329193323Sed    SmallVector<unsigned, 2> Ops;
1330193323Sed    Ops.push_back(i);
1331193323Sed    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1332193323Sed      const MachineOperand &MOj = MI->getOperand(j);
1333193323Sed      if (!MOj.isReg())
1334193323Sed        continue;
1335193323Sed      unsigned RegJ = MOj.getReg();
1336193323Sed      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1337193323Sed        continue;
1338193323Sed      if (RegJ == RegI) {
1339193323Sed        Ops.push_back(j);
1340193323Sed        HasUse |= MOj.isUse();
1341193323Sed        HasDef |= MOj.isDef();
1342193323Sed      }
1343193323Sed    }
1344193323Sed
1345193323Sed    if (HasUse && !li.liveAt(getUseIndex(index)))
1346193323Sed      // Must be defined by an implicit def. It should not be spilled. Note,
1347193323Sed      // this is for correctness reason. e.g.
1348193323Sed      // 8   %reg1024<def> = IMPLICIT_DEF
1349193323Sed      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1350193323Sed      // The live range [12, 14) are not part of the r1024 live interval since
1351193323Sed      // it's defined by an implicit def. It will not conflicts with live
1352193323Sed      // interval of r1025. Now suppose both registers are spilled, you can
1353193323Sed      // easily see a situation where both registers are reloaded before
1354193323Sed      // the INSERT_SUBREG and both target registers that would overlap.
1355193323Sed      HasUse = false;
1356193323Sed
1357193323Sed    // Create a new virtual register for the spill interval.
1358193323Sed    // Create the new register now so we can map the fold instruction
1359193323Sed    // to the new register so when it is unfolded we get the correct
1360193323Sed    // answer.
1361193323Sed    bool CreatedNewVReg = false;
1362193323Sed    if (NewVReg == 0) {
1363193323Sed      NewVReg = mri_->createVirtualRegister(rc);
1364193323Sed      vrm.grow();
1365193323Sed      CreatedNewVReg = true;
1366193323Sed    }
1367193323Sed
1368193323Sed    if (!TryFold)
1369193323Sed      CanFold = false;
1370193323Sed    else {
1371193323Sed      // Do not fold load / store here if we are splitting. We'll find an
1372193323Sed      // optimal point to insert a load / store later.
1373193323Sed      if (!TrySplit) {
1374193323Sed        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1375193323Sed                                 Ops, FoldSS, FoldSlot, NewVReg)) {
1376193323Sed          // Folding the load/store can completely change the instruction in
1377193323Sed          // unpredictable ways, rescan it from the beginning.
1378193323Sed
1379193323Sed          if (FoldSS) {
1380193323Sed            // We need to give the new vreg the same stack slot as the
1381193323Sed            // spilled interval.
1382193323Sed            vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1383193323Sed          }
1384193323Sed
1385193323Sed          HasUse = false;
1386193323Sed          HasDef = false;
1387193323Sed          CanFold = false;
1388193323Sed          if (isNotInMIMap(MI))
1389193323Sed            break;
1390193323Sed          goto RestartInstruction;
1391193323Sed        }
1392193323Sed      } else {
1393193323Sed        // We'll try to fold it later if it's profitable.
1394193323Sed        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1395193323Sed      }
1396193323Sed    }
1397193323Sed
1398193323Sed    mop.setReg(NewVReg);
1399193323Sed    if (mop.isImplicit())
1400193323Sed      rewriteImplicitOps(li, MI, NewVReg, vrm);
1401193323Sed
1402193323Sed    // Reuse NewVReg for other reads.
1403193323Sed    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1404193323Sed      MachineOperand &mopj = MI->getOperand(Ops[j]);
1405193323Sed      mopj.setReg(NewVReg);
1406193323Sed      if (mopj.isImplicit())
1407193323Sed        rewriteImplicitOps(li, MI, NewVReg, vrm);
1408193323Sed    }
1409193323Sed
1410193323Sed    if (CreatedNewVReg) {
1411193323Sed      if (DefIsReMat) {
1412193323Sed        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1413193323Sed        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1414193323Sed          // Each valnum may have its own remat id.
1415193323Sed          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1416193323Sed        } else {
1417193323Sed          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1418193323Sed        }
1419193323Sed        if (!CanDelete || (HasUse && HasDef)) {
1420193323Sed          // If this is a two-addr instruction then its use operands are
1421193323Sed          // rematerializable but its def is not. It should be assigned a
1422193323Sed          // stack slot.
1423193323Sed          vrm.assignVirt2StackSlot(NewVReg, Slot);
1424193323Sed        }
1425193323Sed      } else {
1426193323Sed        vrm.assignVirt2StackSlot(NewVReg, Slot);
1427193323Sed      }
1428193323Sed    } else if (HasUse && HasDef &&
1429193323Sed               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1430193323Sed      // If this interval hasn't been assigned a stack slot (because earlier
1431193323Sed      // def is a deleted remat def), do it now.
1432193323Sed      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1433193323Sed      vrm.assignVirt2StackSlot(NewVReg, Slot);
1434193323Sed    }
1435193323Sed
1436193323Sed    // Re-matting an instruction with virtual register use. Add the
1437193323Sed    // register as an implicit use on the use MI.
1438193323Sed    if (DefIsReMat && ImpUse)
1439193323Sed      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1440193323Sed
1441193323Sed    // Create a new register interval for this spill / remat.
1442193323Sed    LiveInterval &nI = getOrCreateInterval(NewVReg);
1443193323Sed    if (CreatedNewVReg) {
1444193323Sed      NewLIs.push_back(&nI);
1445193323Sed      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1446193323Sed      if (TrySplit)
1447193323Sed        vrm.setIsSplitFromReg(NewVReg, li.reg);
1448193323Sed    }
1449193323Sed
1450193323Sed    if (HasUse) {
1451193323Sed      if (CreatedNewVReg) {
1452193323Sed        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1453193323Sed                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1454193323Sed        DOUT << " +" << LR;
1455193323Sed        nI.addRange(LR);
1456193323Sed      } else {
1457193323Sed        // Extend the split live interval to this def / use.
1458193323Sed        unsigned End = getUseIndex(index)+1;
1459193323Sed        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1460193323Sed                     nI.getValNumInfo(nI.getNumValNums()-1));
1461193323Sed        DOUT << " +" << LR;
1462193323Sed        nI.addRange(LR);
1463193323Sed      }
1464193323Sed    }
1465193323Sed    if (HasDef) {
1466193323Sed      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1467193323Sed                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1468193323Sed      DOUT << " +" << LR;
1469193323Sed      nI.addRange(LR);
1470193323Sed    }
1471193323Sed
1472193323Sed    DOUT << "\t\t\t\tAdded new interval: ";
1473193323Sed    nI.print(DOUT, tri_);
1474193323Sed    DOUT << '\n';
1475193323Sed  }
1476193323Sed  return CanFold;
1477193323Sed}
1478193323Sedbool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1479193323Sed                                   const VNInfo *VNI,
1480193323Sed                                   MachineBasicBlock *MBB, unsigned Idx) const {
1481193323Sed  unsigned End = getMBBEndIdx(MBB);
1482193323Sed  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1483193323Sed    unsigned KillIdx = VNI->kills[j];
1484193323Sed    if (KillIdx > Idx && KillIdx < End)
1485193323Sed      return true;
1486193323Sed  }
1487193323Sed  return false;
1488193323Sed}
1489193323Sed
1490193323Sed/// RewriteInfo - Keep track of machine instrs that will be rewritten
1491193323Sed/// during spilling.
1492193323Sednamespace {
1493193323Sed  struct RewriteInfo {
1494193323Sed    unsigned Index;
1495193323Sed    MachineInstr *MI;
1496193323Sed    bool HasUse;
1497193323Sed    bool HasDef;
1498193323Sed    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1499193323Sed      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1500193323Sed  };
1501193323Sed
1502193323Sed  struct RewriteInfoCompare {
1503193323Sed    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1504193323Sed      return LHS.Index < RHS.Index;
1505193323Sed    }
1506193323Sed  };
1507193323Sed}
1508193323Sed
1509193323Sedvoid LiveIntervals::
1510193323SedrewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1511193323Sed                    LiveInterval::Ranges::const_iterator &I,
1512193323Sed                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1513193323Sed                    unsigned Slot, int LdSlot,
1514193323Sed                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1515193323Sed                    VirtRegMap &vrm,
1516193323Sed                    const TargetRegisterClass* rc,
1517193323Sed                    SmallVector<int, 4> &ReMatIds,
1518193323Sed                    const MachineLoopInfo *loopInfo,
1519193323Sed                    BitVector &SpillMBBs,
1520193323Sed                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1521193323Sed                    BitVector &RestoreMBBs,
1522193323Sed                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1523193323Sed                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1524193323Sed                    std::vector<LiveInterval*> &NewLIs) {
1525193323Sed  bool AllCanFold = true;
1526193323Sed  unsigned NewVReg = 0;
1527193323Sed  unsigned start = getBaseIndex(I->start);
1528193323Sed  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1529193323Sed
1530193323Sed  // First collect all the def / use in this live range that will be rewritten.
1531193323Sed  // Make sure they are sorted according to instruction index.
1532193323Sed  std::vector<RewriteInfo> RewriteMIs;
1533193323Sed  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1534193323Sed         re = mri_->reg_end(); ri != re; ) {
1535193323Sed    MachineInstr *MI = &*ri;
1536193323Sed    MachineOperand &O = ri.getOperand();
1537193323Sed    ++ri;
1538193323Sed    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1539193323Sed    unsigned index = getInstructionIndex(MI);
1540193323Sed    if (index < start || index >= end)
1541193323Sed      continue;
1542193323Sed    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1543193323Sed      // Must be defined by an implicit def. It should not be spilled. Note,
1544193323Sed      // this is for correctness reason. e.g.
1545193323Sed      // 8   %reg1024<def> = IMPLICIT_DEF
1546193323Sed      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1547193323Sed      // The live range [12, 14) are not part of the r1024 live interval since
1548193323Sed      // it's defined by an implicit def. It will not conflicts with live
1549193323Sed      // interval of r1025. Now suppose both registers are spilled, you can
1550193323Sed      // easily see a situation where both registers are reloaded before
1551193323Sed      // the INSERT_SUBREG and both target registers that would overlap.
1552193323Sed      continue;
1553193323Sed    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1554193323Sed  }
1555193323Sed  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1556193323Sed
1557193323Sed  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1558193323Sed  // Now rewrite the defs and uses.
1559193323Sed  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1560193323Sed    RewriteInfo &rwi = RewriteMIs[i];
1561193323Sed    ++i;
1562193323Sed    unsigned index = rwi.Index;
1563193323Sed    bool MIHasUse = rwi.HasUse;
1564193323Sed    bool MIHasDef = rwi.HasDef;
1565193323Sed    MachineInstr *MI = rwi.MI;
1566193323Sed    // If MI def and/or use the same register multiple times, then there
1567193323Sed    // are multiple entries.
1568193323Sed    unsigned NumUses = MIHasUse;
1569193323Sed    while (i != e && RewriteMIs[i].MI == MI) {
1570193323Sed      assert(RewriteMIs[i].Index == index);
1571193323Sed      bool isUse = RewriteMIs[i].HasUse;
1572193323Sed      if (isUse) ++NumUses;
1573193323Sed      MIHasUse |= isUse;
1574193323Sed      MIHasDef |= RewriteMIs[i].HasDef;
1575193323Sed      ++i;
1576193323Sed    }
1577193323Sed    MachineBasicBlock *MBB = MI->getParent();
1578193323Sed
1579193323Sed    if (ImpUse && MI != ReMatDefMI) {
1580193323Sed      // Re-matting an instruction with virtual register use. Update the
1581193323Sed      // register interval's spill weight to HUGE_VALF to prevent it from
1582193323Sed      // being spilled.
1583193323Sed      LiveInterval &ImpLi = getInterval(ImpUse);
1584193323Sed      ImpLi.weight = HUGE_VALF;
1585193323Sed    }
1586193323Sed
1587193323Sed    unsigned MBBId = MBB->getNumber();
1588193323Sed    unsigned ThisVReg = 0;
1589193323Sed    if (TrySplit) {
1590193323Sed      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1591193323Sed      if (NVI != MBBVRegsMap.end()) {
1592193323Sed        ThisVReg = NVI->second;
1593193323Sed        // One common case:
1594193323Sed        // x = use
1595193323Sed        // ...
1596193323Sed        // ...
1597193323Sed        // def = ...
1598193323Sed        //     = use
1599193323Sed        // It's better to start a new interval to avoid artifically
1600193323Sed        // extend the new interval.
1601193323Sed        if (MIHasDef && !MIHasUse) {
1602193323Sed          MBBVRegsMap.erase(MBB->getNumber());
1603193323Sed          ThisVReg = 0;
1604193323Sed        }
1605193323Sed      }
1606193323Sed    }
1607193323Sed
1608193323Sed    bool IsNew = ThisVReg == 0;
1609193323Sed    if (IsNew) {
1610193323Sed      // This ends the previous live interval. If all of its def / use
1611193323Sed      // can be folded, give it a low spill weight.
1612193323Sed      if (NewVReg && TrySplit && AllCanFold) {
1613193323Sed        LiveInterval &nI = getOrCreateInterval(NewVReg);
1614193323Sed        nI.weight /= 10.0F;
1615193323Sed      }
1616193323Sed      AllCanFold = true;
1617193323Sed    }
1618193323Sed    NewVReg = ThisVReg;
1619193323Sed
1620193323Sed    bool HasDef = false;
1621193323Sed    bool HasUse = false;
1622193323Sed    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1623193323Sed                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1624193323Sed                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1625193323Sed                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1626193323Sed                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1627193323Sed    if (!HasDef && !HasUse)
1628193323Sed      continue;
1629193323Sed
1630193323Sed    AllCanFold &= CanFold;
1631193323Sed
1632193323Sed    // Update weight of spill interval.
1633193323Sed    LiveInterval &nI = getOrCreateInterval(NewVReg);
1634193323Sed    if (!TrySplit) {
1635193323Sed      // The spill weight is now infinity as it cannot be spilled again.
1636193323Sed      nI.weight = HUGE_VALF;
1637193323Sed      continue;
1638193323Sed    }
1639193323Sed
1640193323Sed    // Keep track of the last def and first use in each MBB.
1641193323Sed    if (HasDef) {
1642193323Sed      if (MI != ReMatOrigDefMI || !CanDelete) {
1643193323Sed        bool HasKill = false;
1644193323Sed        if (!HasUse)
1645193323Sed          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1646193323Sed        else {
1647193323Sed          // If this is a two-address code, then this index starts a new VNInfo.
1648193323Sed          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1649193323Sed          if (VNI)
1650193323Sed            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1651193323Sed        }
1652193323Sed        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1653193323Sed          SpillIdxes.find(MBBId);
1654193323Sed        if (!HasKill) {
1655193323Sed          if (SII == SpillIdxes.end()) {
1656193323Sed            std::vector<SRInfo> S;
1657193323Sed            S.push_back(SRInfo(index, NewVReg, true));
1658193323Sed            SpillIdxes.insert(std::make_pair(MBBId, S));
1659193323Sed          } else if (SII->second.back().vreg != NewVReg) {
1660193323Sed            SII->second.push_back(SRInfo(index, NewVReg, true));
1661193323Sed          } else if ((int)index > SII->second.back().index) {
1662193323Sed            // If there is an earlier def and this is a two-address
1663193323Sed            // instruction, then it's not possible to fold the store (which
1664193323Sed            // would also fold the load).
1665193323Sed            SRInfo &Info = SII->second.back();
1666193323Sed            Info.index = index;
1667193323Sed            Info.canFold = !HasUse;
1668193323Sed          }
1669193323Sed          SpillMBBs.set(MBBId);
1670193323Sed        } else if (SII != SpillIdxes.end() &&
1671193323Sed                   SII->second.back().vreg == NewVReg &&
1672193323Sed                   (int)index > SII->second.back().index) {
1673193323Sed          // There is an earlier def that's not killed (must be two-address).
1674193323Sed          // The spill is no longer needed.
1675193323Sed          SII->second.pop_back();
1676193323Sed          if (SII->second.empty()) {
1677193323Sed            SpillIdxes.erase(MBBId);
1678193323Sed            SpillMBBs.reset(MBBId);
1679193323Sed          }
1680193323Sed        }
1681193323Sed      }
1682193323Sed    }
1683193323Sed
1684193323Sed    if (HasUse) {
1685193323Sed      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1686193323Sed        SpillIdxes.find(MBBId);
1687193323Sed      if (SII != SpillIdxes.end() &&
1688193323Sed          SII->second.back().vreg == NewVReg &&
1689193323Sed          (int)index > SII->second.back().index)
1690193323Sed        // Use(s) following the last def, it's not safe to fold the spill.
1691193323Sed        SII->second.back().canFold = false;
1692193323Sed      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1693193323Sed        RestoreIdxes.find(MBBId);
1694193323Sed      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1695193323Sed        // If we are splitting live intervals, only fold if it's the first
1696193323Sed        // use and there isn't another use later in the MBB.
1697193323Sed        RII->second.back().canFold = false;
1698193323Sed      else if (IsNew) {
1699193323Sed        // Only need a reload if there isn't an earlier def / use.
1700193323Sed        if (RII == RestoreIdxes.end()) {
1701193323Sed          std::vector<SRInfo> Infos;
1702193323Sed          Infos.push_back(SRInfo(index, NewVReg, true));
1703193323Sed          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1704193323Sed        } else {
1705193323Sed          RII->second.push_back(SRInfo(index, NewVReg, true));
1706193323Sed        }
1707193323Sed        RestoreMBBs.set(MBBId);
1708193323Sed      }
1709193323Sed    }
1710193323Sed
1711193323Sed    // Update spill weight.
1712193323Sed    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1713193323Sed    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1714193323Sed  }
1715193323Sed
1716193323Sed  if (NewVReg && TrySplit && AllCanFold) {
1717193323Sed    // If all of its def / use can be folded, give it a low spill weight.
1718193323Sed    LiveInterval &nI = getOrCreateInterval(NewVReg);
1719193323Sed    nI.weight /= 10.0F;
1720193323Sed  }
1721193323Sed}
1722193323Sed
1723193323Sedbool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1724193323Sed                        BitVector &RestoreMBBs,
1725193323Sed                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1726193323Sed  if (!RestoreMBBs[Id])
1727193323Sed    return false;
1728193323Sed  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1729193323Sed  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1730193323Sed    if (Restores[i].index == index &&
1731193323Sed        Restores[i].vreg == vr &&
1732193323Sed        Restores[i].canFold)
1733193323Sed      return true;
1734193323Sed  return false;
1735193323Sed}
1736193323Sed
1737193323Sedvoid LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1738193323Sed                        BitVector &RestoreMBBs,
1739193323Sed                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1740193323Sed  if (!RestoreMBBs[Id])
1741193323Sed    return;
1742193323Sed  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1743193323Sed  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1744193323Sed    if (Restores[i].index == index && Restores[i].vreg)
1745193323Sed      Restores[i].index = -1;
1746193323Sed}
1747193323Sed
1748193323Sed/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1749193323Sed/// spilled and create empty intervals for their uses.
1750193323Sedvoid
1751193323SedLiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1752193323Sed                                    const TargetRegisterClass* rc,
1753193323Sed                                    std::vector<LiveInterval*> &NewLIs) {
1754193323Sed  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1755193323Sed         re = mri_->reg_end(); ri != re; ) {
1756193323Sed    MachineOperand &O = ri.getOperand();
1757193323Sed    MachineInstr *MI = &*ri;
1758193323Sed    ++ri;
1759193323Sed    if (O.isDef()) {
1760193323Sed      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1761193323Sed             "Register def was not rewritten?");
1762193323Sed      RemoveMachineInstrFromMaps(MI);
1763193323Sed      vrm.RemoveMachineInstrFromMaps(MI);
1764193323Sed      MI->eraseFromParent();
1765193323Sed    } else {
1766193323Sed      // This must be an use of an implicit_def so it's not part of the live
1767193323Sed      // interval. Create a new empty live interval for it.
1768193323Sed      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1769193323Sed      unsigned NewVReg = mri_->createVirtualRegister(rc);
1770193323Sed      vrm.grow();
1771193323Sed      vrm.setIsImplicitlyDefined(NewVReg);
1772193323Sed      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1773193323Sed      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1774193323Sed        MachineOperand &MO = MI->getOperand(i);
1775193323Sed        if (MO.isReg() && MO.getReg() == li.reg)
1776193323Sed          MO.setReg(NewVReg);
1777193323Sed      }
1778193323Sed    }
1779193323Sed  }
1780193323Sed}
1781193323Sed
1782193323Sedstd::vector<LiveInterval*> LiveIntervals::
1783193323SedaddIntervalsForSpillsFast(const LiveInterval &li,
1784193323Sed                          const MachineLoopInfo *loopInfo,
1785193323Sed                          VirtRegMap &vrm) {
1786193323Sed  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1787193323Sed
1788193323Sed  std::vector<LiveInterval*> added;
1789193323Sed
1790193323Sed  assert(li.weight != HUGE_VALF &&
1791193323Sed         "attempt to spill already spilled interval!");
1792193323Sed
1793193323Sed  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1794193323Sed  DEBUG(li.dump());
1795193323Sed  DOUT << '\n';
1796193323Sed
1797193323Sed  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1798193323Sed
1799193323Sed  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1800193323Sed  while (RI != mri_->reg_end()) {
1801193323Sed    MachineInstr* MI = &*RI;
1802193323Sed
1803193323Sed    SmallVector<unsigned, 2> Indices;
1804193323Sed    bool HasUse = false;
1805193323Sed    bool HasDef = false;
1806193323Sed
1807193323Sed    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1808193323Sed      MachineOperand& mop = MI->getOperand(i);
1809193323Sed      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1810193323Sed
1811193323Sed      HasUse |= MI->getOperand(i).isUse();
1812193323Sed      HasDef |= MI->getOperand(i).isDef();
1813193323Sed
1814193323Sed      Indices.push_back(i);
1815193323Sed    }
1816193323Sed
1817193323Sed    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1818193323Sed                              Indices, true, slot, li.reg)) {
1819193323Sed      unsigned NewVReg = mri_->createVirtualRegister(rc);
1820193323Sed      vrm.grow();
1821193323Sed      vrm.assignVirt2StackSlot(NewVReg, slot);
1822193323Sed
1823193323Sed      // create a new register for this spill
1824193323Sed      LiveInterval &nI = getOrCreateInterval(NewVReg);
1825193323Sed
1826193323Sed      // the spill weight is now infinity as it
1827193323Sed      // cannot be spilled again
1828193323Sed      nI.weight = HUGE_VALF;
1829193323Sed
1830193323Sed      // Rewrite register operands to use the new vreg.
1831193323Sed      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1832193323Sed           E = Indices.end(); I != E; ++I) {
1833193323Sed        MI->getOperand(*I).setReg(NewVReg);
1834193323Sed
1835193323Sed        if (MI->getOperand(*I).isUse())
1836193323Sed          MI->getOperand(*I).setIsKill(true);
1837193323Sed      }
1838193323Sed
1839193323Sed      // Fill in  the new live interval.
1840193323Sed      unsigned index = getInstructionIndex(MI);
1841193323Sed      if (HasUse) {
1842193323Sed        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1843193323Sed                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1844193323Sed        DOUT << " +" << LR;
1845193323Sed        nI.addRange(LR);
1846193323Sed        vrm.addRestorePoint(NewVReg, MI);
1847193323Sed      }
1848193323Sed      if (HasDef) {
1849193323Sed        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1850193323Sed                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1851193323Sed        DOUT << " +" << LR;
1852193323Sed        nI.addRange(LR);
1853193323Sed        vrm.addSpillPoint(NewVReg, true, MI);
1854193323Sed      }
1855193323Sed
1856193323Sed      added.push_back(&nI);
1857193323Sed
1858193323Sed      DOUT << "\t\t\t\tadded new interval: ";
1859193323Sed      DEBUG(nI.dump());
1860193323Sed      DOUT << '\n';
1861193323Sed    }
1862193323Sed
1863193323Sed
1864193323Sed    RI = mri_->reg_begin(li.reg);
1865193323Sed  }
1866193323Sed
1867193323Sed  return added;
1868193323Sed}
1869193323Sed
1870193323Sedstd::vector<LiveInterval*> LiveIntervals::
1871193323SedaddIntervalsForSpills(const LiveInterval &li,
1872193323Sed                      SmallVectorImpl<LiveInterval*> &SpillIs,
1873193323Sed                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1874193323Sed
1875193323Sed  if (EnableFastSpilling)
1876193323Sed    return addIntervalsForSpillsFast(li, loopInfo, vrm);
1877193323Sed
1878193323Sed  assert(li.weight != HUGE_VALF &&
1879193323Sed         "attempt to spill already spilled interval!");
1880193323Sed
1881193323Sed  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1882193323Sed  li.print(DOUT, tri_);
1883193323Sed  DOUT << '\n';
1884193323Sed
1885193323Sed  // Each bit specify whether a spill is required in the MBB.
1886193323Sed  BitVector SpillMBBs(mf_->getNumBlockIDs());
1887193323Sed  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1888193323Sed  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1889193323Sed  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1890193323Sed  DenseMap<unsigned,unsigned> MBBVRegsMap;
1891193323Sed  std::vector<LiveInterval*> NewLIs;
1892193323Sed  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1893193323Sed
1894193323Sed  unsigned NumValNums = li.getNumValNums();
1895193323Sed  SmallVector<MachineInstr*, 4> ReMatDefs;
1896193323Sed  ReMatDefs.resize(NumValNums, NULL);
1897193323Sed  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1898193323Sed  ReMatOrigDefs.resize(NumValNums, NULL);
1899193323Sed  SmallVector<int, 4> ReMatIds;
1900193323Sed  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1901193323Sed  BitVector ReMatDelete(NumValNums);
1902193323Sed  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1903193323Sed
1904193323Sed  // Spilling a split live interval. It cannot be split any further. Also,
1905193323Sed  // it's also guaranteed to be a single val# / range interval.
1906193323Sed  if (vrm.getPreSplitReg(li.reg)) {
1907193323Sed    vrm.setIsSplitFromReg(li.reg, 0);
1908193323Sed    // Unset the split kill marker on the last use.
1909193323Sed    unsigned KillIdx = vrm.getKillPoint(li.reg);
1910193323Sed    if (KillIdx) {
1911193323Sed      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1912193323Sed      assert(KillMI && "Last use disappeared?");
1913193323Sed      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1914193323Sed      assert(KillOp != -1 && "Last use disappeared?");
1915193323Sed      KillMI->getOperand(KillOp).setIsKill(false);
1916193323Sed    }
1917193323Sed    vrm.removeKillPoint(li.reg);
1918193323Sed    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1919193323Sed    Slot = vrm.getStackSlot(li.reg);
1920193323Sed    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1921193323Sed    MachineInstr *ReMatDefMI = DefIsReMat ?
1922193323Sed      vrm.getReMaterializedMI(li.reg) : NULL;
1923193323Sed    int LdSlot = 0;
1924193323Sed    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1925193323Sed    bool isLoad = isLoadSS ||
1926193323Sed      (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1927193323Sed    bool IsFirstRange = true;
1928193323Sed    for (LiveInterval::Ranges::const_iterator
1929193323Sed           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1930193323Sed      // If this is a split live interval with multiple ranges, it means there
1931193323Sed      // are two-address instructions that re-defined the value. Only the
1932193323Sed      // first def can be rematerialized!
1933193323Sed      if (IsFirstRange) {
1934193323Sed        // Note ReMatOrigDefMI has already been deleted.
1935193323Sed        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1936193323Sed                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1937193323Sed                             false, vrm, rc, ReMatIds, loopInfo,
1938193323Sed                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1939193323Sed                             MBBVRegsMap, NewLIs);
1940193323Sed      } else {
1941193323Sed        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1942193323Sed                             Slot, 0, false, false, false,
1943193323Sed                             false, vrm, rc, ReMatIds, loopInfo,
1944193323Sed                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1945193323Sed                             MBBVRegsMap, NewLIs);
1946193323Sed      }
1947193323Sed      IsFirstRange = false;
1948193323Sed    }
1949193323Sed
1950193323Sed    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1951193323Sed    return NewLIs;
1952193323Sed  }
1953193323Sed
1954193323Sed  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1955193323Sed  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1956193323Sed    TrySplit = false;
1957193323Sed  if (TrySplit)
1958193323Sed    ++numSplits;
1959193323Sed  bool NeedStackSlot = false;
1960193323Sed  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1961193323Sed       i != e; ++i) {
1962193323Sed    const VNInfo *VNI = *i;
1963193323Sed    unsigned VN = VNI->id;
1964193323Sed    unsigned DefIdx = VNI->def;
1965193323Sed    if (DefIdx == ~1U)
1966193323Sed      continue; // Dead val#.
1967193323Sed    // Is the def for the val# rematerializable?
1968193323Sed    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1969193323Sed      ? 0 : getInstructionFromIndex(DefIdx);
1970193323Sed    bool dummy;
1971193323Sed    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1972193323Sed      // Remember how to remat the def of this val#.
1973193323Sed      ReMatOrigDefs[VN] = ReMatDefMI;
1974193323Sed      // Original def may be modified so we have to make a copy here.
1975193323Sed      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1976193323Sed      ClonedMIs.push_back(Clone);
1977193323Sed      ReMatDefs[VN] = Clone;
1978193323Sed
1979193323Sed      bool CanDelete = true;
1980193323Sed      if (VNI->hasPHIKill) {
1981193323Sed        // A kill is a phi node, not all of its uses can be rematerialized.
1982193323Sed        // It must not be deleted.
1983193323Sed        CanDelete = false;
1984193323Sed        // Need a stack slot if there is any live range where uses cannot be
1985193323Sed        // rematerialized.
1986193323Sed        NeedStackSlot = true;
1987193323Sed      }
1988193323Sed      if (CanDelete)
1989193323Sed        ReMatDelete.set(VN);
1990193323Sed    } else {
1991193323Sed      // Need a stack slot if there is any live range where uses cannot be
1992193323Sed      // rematerialized.
1993193323Sed      NeedStackSlot = true;
1994193323Sed    }
1995193323Sed  }
1996193323Sed
1997193323Sed  // One stack slot per live interval.
1998193323Sed  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1999193323Sed    if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2000193323Sed      Slot = vrm.assignVirt2StackSlot(li.reg);
2001193323Sed
2002193323Sed    // This case only occurs when the prealloc splitter has already assigned
2003193323Sed    // a stack slot to this vreg.
2004193323Sed    else
2005193323Sed      Slot = vrm.getStackSlot(li.reg);
2006193323Sed  }
2007193323Sed
2008193323Sed  // Create new intervals and rewrite defs and uses.
2009193323Sed  for (LiveInterval::Ranges::const_iterator
2010193323Sed         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2011193323Sed    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2012193323Sed    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2013193323Sed    bool DefIsReMat = ReMatDefMI != NULL;
2014193323Sed    bool CanDelete = ReMatDelete[I->valno->id];
2015193323Sed    int LdSlot = 0;
2016193323Sed    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2017193323Sed    bool isLoad = isLoadSS ||
2018193323Sed      (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2019193323Sed    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2020193323Sed                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2021193323Sed                               CanDelete, vrm, rc, ReMatIds, loopInfo,
2022193323Sed                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2023193323Sed                               MBBVRegsMap, NewLIs);
2024193323Sed  }
2025193323Sed
2026193323Sed  // Insert spills / restores if we are splitting.
2027193323Sed  if (!TrySplit) {
2028193323Sed    handleSpilledImpDefs(li, vrm, rc, NewLIs);
2029193323Sed    return NewLIs;
2030193323Sed  }
2031193323Sed
2032193323Sed  SmallPtrSet<LiveInterval*, 4> AddedKill;
2033193323Sed  SmallVector<unsigned, 2> Ops;
2034193323Sed  if (NeedStackSlot) {
2035193323Sed    int Id = SpillMBBs.find_first();
2036193323Sed    while (Id != -1) {
2037193323Sed      std::vector<SRInfo> &spills = SpillIdxes[Id];
2038193323Sed      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2039193323Sed        int index = spills[i].index;
2040193323Sed        unsigned VReg = spills[i].vreg;
2041193323Sed        LiveInterval &nI = getOrCreateInterval(VReg);
2042193323Sed        bool isReMat = vrm.isReMaterialized(VReg);
2043193323Sed        MachineInstr *MI = getInstructionFromIndex(index);
2044193323Sed        bool CanFold = false;
2045193323Sed        bool FoundUse = false;
2046193323Sed        Ops.clear();
2047193323Sed        if (spills[i].canFold) {
2048193323Sed          CanFold = true;
2049193323Sed          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2050193323Sed            MachineOperand &MO = MI->getOperand(j);
2051193323Sed            if (!MO.isReg() || MO.getReg() != VReg)
2052193323Sed              continue;
2053193323Sed
2054193323Sed            Ops.push_back(j);
2055193323Sed            if (MO.isDef())
2056193323Sed              continue;
2057193323Sed            if (isReMat ||
2058193323Sed                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2059193323Sed                                                RestoreMBBs, RestoreIdxes))) {
2060193323Sed              // MI has two-address uses of the same register. If the use
2061193323Sed              // isn't the first and only use in the BB, then we can't fold
2062193323Sed              // it. FIXME: Move this to rewriteInstructionsForSpills.
2063193323Sed              CanFold = false;
2064193323Sed              break;
2065193323Sed            }
2066193323Sed            FoundUse = true;
2067193323Sed          }
2068193323Sed        }
2069193323Sed        // Fold the store into the def if possible.
2070193323Sed        bool Folded = false;
2071193323Sed        if (CanFold && !Ops.empty()) {
2072193323Sed          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2073193323Sed            Folded = true;
2074193323Sed            if (FoundUse) {
2075193323Sed              // Also folded uses, do not issue a load.
2076193323Sed              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2077193323Sed              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2078193323Sed            }
2079193323Sed            nI.removeRange(getDefIndex(index), getStoreIndex(index));
2080193323Sed          }
2081193323Sed        }
2082193323Sed
2083193323Sed        // Otherwise tell the spiller to issue a spill.
2084193323Sed        if (!Folded) {
2085193323Sed          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2086193323Sed          bool isKill = LR->end == getStoreIndex(index);
2087193323Sed          if (!MI->registerDefIsDead(nI.reg))
2088193323Sed            // No need to spill a dead def.
2089193323Sed            vrm.addSpillPoint(VReg, isKill, MI);
2090193323Sed          if (isKill)
2091193323Sed            AddedKill.insert(&nI);
2092193323Sed        }
2093193323Sed      }
2094193323Sed      Id = SpillMBBs.find_next(Id);
2095193323Sed    }
2096193323Sed  }
2097193323Sed
2098193323Sed  int Id = RestoreMBBs.find_first();
2099193323Sed  while (Id != -1) {
2100193323Sed    std::vector<SRInfo> &restores = RestoreIdxes[Id];
2101193323Sed    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2102193323Sed      int index = restores[i].index;
2103193323Sed      if (index == -1)
2104193323Sed        continue;
2105193323Sed      unsigned VReg = restores[i].vreg;
2106193323Sed      LiveInterval &nI = getOrCreateInterval(VReg);
2107193323Sed      bool isReMat = vrm.isReMaterialized(VReg);
2108193323Sed      MachineInstr *MI = getInstructionFromIndex(index);
2109193323Sed      bool CanFold = false;
2110193323Sed      Ops.clear();
2111193323Sed      if (restores[i].canFold) {
2112193323Sed        CanFold = true;
2113193323Sed        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2114193323Sed          MachineOperand &MO = MI->getOperand(j);
2115193323Sed          if (!MO.isReg() || MO.getReg() != VReg)
2116193323Sed            continue;
2117193323Sed
2118193323Sed          if (MO.isDef()) {
2119193323Sed            // If this restore were to be folded, it would have been folded
2120193323Sed            // already.
2121193323Sed            CanFold = false;
2122193323Sed            break;
2123193323Sed          }
2124193323Sed          Ops.push_back(j);
2125193323Sed        }
2126193323Sed      }
2127193323Sed
2128193323Sed      // Fold the load into the use if possible.
2129193323Sed      bool Folded = false;
2130193323Sed      if (CanFold && !Ops.empty()) {
2131193323Sed        if (!isReMat)
2132193323Sed          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2133193323Sed        else {
2134193323Sed          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2135193323Sed          int LdSlot = 0;
2136193323Sed          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2137193323Sed          // If the rematerializable def is a load, also try to fold it.
2138193323Sed          if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2139193323Sed            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2140193323Sed                                          Ops, isLoadSS, LdSlot, VReg);
2141193323Sed          if (!Folded) {
2142193323Sed            unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2143193323Sed            if (ImpUse) {
2144193323Sed              // Re-matting an instruction with virtual register use. Add the
2145193323Sed              // register as an implicit use on the use MI and update the register
2146193323Sed              // interval's spill weight to HUGE_VALF to prevent it from being
2147193323Sed              // spilled.
2148193323Sed              LiveInterval &ImpLi = getInterval(ImpUse);
2149193323Sed              ImpLi.weight = HUGE_VALF;
2150193323Sed              MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2151193323Sed            }
2152193323Sed          }
2153193323Sed        }
2154193323Sed      }
2155193323Sed      // If folding is not possible / failed, then tell the spiller to issue a
2156193323Sed      // load / rematerialization for us.
2157193323Sed      if (Folded)
2158193323Sed        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2159193323Sed      else
2160193323Sed        vrm.addRestorePoint(VReg, MI);
2161193323Sed    }
2162193323Sed    Id = RestoreMBBs.find_next(Id);
2163193323Sed  }
2164193323Sed
2165193323Sed  // Finalize intervals: add kills, finalize spill weights, and filter out
2166193323Sed  // dead intervals.
2167193323Sed  std::vector<LiveInterval*> RetNewLIs;
2168193323Sed  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2169193323Sed    LiveInterval *LI = NewLIs[i];
2170193323Sed    if (!LI->empty()) {
2171193323Sed      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2172193323Sed      if (!AddedKill.count(LI)) {
2173193323Sed        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2174193323Sed        unsigned LastUseIdx = getBaseIndex(LR->end);
2175193323Sed        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2176193323Sed        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2177193323Sed        assert(UseIdx != -1);
2178193323Sed        if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2179193323Sed          LastUse->getOperand(UseIdx).setIsKill();
2180193323Sed          vrm.addKillPoint(LI->reg, LastUseIdx);
2181193323Sed        }
2182193323Sed      }
2183193323Sed      RetNewLIs.push_back(LI);
2184193323Sed    }
2185193323Sed  }
2186193323Sed
2187193323Sed  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2188193323Sed  return RetNewLIs;
2189193323Sed}
2190193323Sed
2191193323Sed/// hasAllocatableSuperReg - Return true if the specified physical register has
2192193323Sed/// any super register that's allocatable.
2193193323Sedbool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2194193323Sed  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2195193323Sed    if (allocatableRegs_[*AS] && hasInterval(*AS))
2196193323Sed      return true;
2197193323Sed  return false;
2198193323Sed}
2199193323Sed
2200193323Sed/// getRepresentativeReg - Find the largest super register of the specified
2201193323Sed/// physical register.
2202193323Sedunsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2203193323Sed  // Find the largest super-register that is allocatable.
2204193323Sed  unsigned BestReg = Reg;
2205193323Sed  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2206193323Sed    unsigned SuperReg = *AS;
2207193323Sed    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2208193323Sed      BestReg = SuperReg;
2209193323Sed      break;
2210193323Sed    }
2211193323Sed  }
2212193323Sed  return BestReg;
2213193323Sed}
2214193323Sed
2215193323Sed/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2216193323Sed/// specified interval that conflicts with the specified physical register.
2217193323Sedunsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2218193323Sed                                                   unsigned PhysReg) const {
2219193323Sed  unsigned NumConflicts = 0;
2220193323Sed  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2221193323Sed  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2222193323Sed         E = mri_->reg_end(); I != E; ++I) {
2223193323Sed    MachineOperand &O = I.getOperand();
2224193323Sed    MachineInstr *MI = O.getParent();
2225193323Sed    unsigned Index = getInstructionIndex(MI);
2226193323Sed    if (pli.liveAt(Index))
2227193323Sed      ++NumConflicts;
2228193323Sed  }
2229193323Sed  return NumConflicts;
2230193323Sed}
2231193323Sed
2232193323Sed/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2233193323Sed/// around all defs and uses of the specified interval. Return true if it
2234193323Sed/// was able to cut its interval.
2235193323Sedbool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2236193323Sed                                            unsigned PhysReg, VirtRegMap &vrm) {
2237193323Sed  unsigned SpillReg = getRepresentativeReg(PhysReg);
2238193323Sed
2239193323Sed  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2240193323Sed    // If there are registers which alias PhysReg, but which are not a
2241193323Sed    // sub-register of the chosen representative super register. Assert
2242193323Sed    // since we can't handle it yet.
2243193323Sed    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2244193323Sed           tri_->isSuperRegister(*AS, SpillReg));
2245193323Sed
2246193323Sed  bool Cut = false;
2247193323Sed  LiveInterval &pli = getInterval(SpillReg);
2248193323Sed  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2249193323Sed  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2250193323Sed         E = mri_->reg_end(); I != E; ++I) {
2251193323Sed    MachineOperand &O = I.getOperand();
2252193323Sed    MachineInstr *MI = O.getParent();
2253193323Sed    if (SeenMIs.count(MI))
2254193323Sed      continue;
2255193323Sed    SeenMIs.insert(MI);
2256193323Sed    unsigned Index = getInstructionIndex(MI);
2257193323Sed    if (pli.liveAt(Index)) {
2258193323Sed      vrm.addEmergencySpill(SpillReg, MI);
2259193323Sed      unsigned StartIdx = getLoadIndex(Index);
2260193323Sed      unsigned EndIdx = getStoreIndex(Index)+1;
2261193323Sed      if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2262193323Sed        pli.removeRange(StartIdx, EndIdx);
2263193323Sed        Cut = true;
2264193323Sed      } else {
2265193323Sed        cerr << "Ran out of registers during register allocation!\n";
2266193323Sed        if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2267193323Sed          cerr << "Please check your inline asm statement for invalid "
2268193323Sed               << "constraints:\n";
2269193323Sed          MI->print(cerr.stream(), tm_);
2270193323Sed        }
2271193323Sed        exit(1);
2272193323Sed      }
2273193323Sed      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2274193323Sed        if (!hasInterval(*AS))
2275193323Sed          continue;
2276193323Sed        LiveInterval &spli = getInterval(*AS);
2277193323Sed        if (spli.liveAt(Index))
2278193323Sed          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2279193323Sed      }
2280193323Sed    }
2281193323Sed  }
2282193323Sed  return Cut;
2283193323Sed}
2284193323Sed
2285193323SedLiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2286193323Sed                                                   MachineInstr* startInst) {
2287193323Sed  LiveInterval& Interval = getOrCreateInterval(reg);
2288193323Sed  VNInfo* VN = Interval.getNextValue(
2289193323Sed            getInstructionIndex(startInst) + InstrSlots::DEF,
2290193323Sed            startInst, getVNInfoAllocator());
2291193323Sed  VN->hasPHIKill = true;
2292193323Sed  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2293193323Sed  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2294193323Sed               getMBBEndIdx(startInst->getParent()) + 1, VN);
2295193323Sed  Interval.addRange(LR);
2296193323Sed
2297193323Sed  return LR;
2298193323Sed}
2299