Deleted Added
full compact
LiveIntervalAnalysis.cpp (199481) LiveIntervalAnalysis.cpp (199511)
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
55
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineLoopInfo.h"
28#include "llvm/CodeGen/MachineMemOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/Passes.h"
31#include "llvm/CodeGen/ProcessImplicitDefs.h"
32#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetInstrInfo.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Target/TargetOptions.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/ErrorHandling.h"
39#include "llvm/Support/raw_ostream.h"
40#include "llvm/ADT/DepthFirstIterator.h"
41#include "llvm/ADT/SmallSet.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/ADT/STLExtras.h"
44#include <algorithm>
45#include <limits>
46#include <cmath>
47using namespace llvm;
48
49// Hidden options for help debugging.
50static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
55
56static cl::opt<bool> EarlyCoalescing("early-coalescing",
57 cl::init(false), cl::Hidden);
58
59static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
61
62STATISTIC(numIntervals , "Number of original intervals");
63STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64STATISTIC(numSplits , "Number of intervals split");
56STATISTIC(numIntervals , "Number of original intervals");
57STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58STATISTIC(numSplits , "Number of intervals split");
65STATISTIC(numCoalescing, "Number of early coalescing performed");
66
67char LiveIntervals::ID = 0;
68static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69
70void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
71 AU.setPreservesCFG();
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
78
79 if (!StrongPHIElim) {
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
82 }
83
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 AU.addPreserved<ProcessImplicitDefs>();
86 AU.addRequired<ProcessImplicitDefs>();
87 AU.addPreserved<SlotIndexes>();
88 AU.addRequiredTransitive<SlotIndexes>();
89 MachineFunctionPass::getAnalysisUsage(AU);
90}
91
92void LiveIntervals::releaseMemory() {
93 // Free the live intervals themselves.
94 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
95 E = r2iMap_.end(); I != E; ++I)
96 delete I->second;
97
98 r2iMap_.clear();
59
60char LiveIntervals::ID = 0;
61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesCFG();
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
71
72 if (!StrongPHIElim) {
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
75 }
76
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
83}
84
85void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
89 delete I->second;
90
91 r2iMap_.clear();
99 phiJoinCopies.clear();
100
101 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
105 CloneMIs.pop_back();
106 mf_->DeleteMachineInstr(MI);
107 }
108}
109
110/// runOnMachineFunction - Register allocate the whole function
111///
112bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
113 mf_ = &fn;
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
122
123 computeIntervals();
92
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
97 CloneMIs.pop_back();
98 mf_->DeleteMachineInstr(MI);
99 }
100}
101
102/// runOnMachineFunction - Register allocate the whole function
103///
104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mf_ = &fn;
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
114
115 computeIntervals();
124 performEarlyCoalescing();
125
126 numIntervals += getNumIntervals();
127
128 DEBUG(dump());
129 return true;
130}
131
132/// print - Implement the dump method.
133void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
134 OS << "********** INTERVALS **********\n";
135 for (const_iterator I = begin(), E = end(); I != E; ++I) {
136 I->second->print(OS, tri_);
137 OS << "\n";
138 }
139
140 printInstrs(OS);
141}
142
143void LiveIntervals::printInstrs(raw_ostream &OS) const {
144 OS << "********** MACHINEINSTRS **********\n";
145
146 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
147 mbbi != mbbe; ++mbbi) {
148 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
149 for (MachineBasicBlock::iterator mii = mbbi->begin(),
150 mie = mbbi->end(); mii != mie; ++mii) {
151 OS << getInstructionIndex(mii) << '\t' << *mii;
152 }
153 }
154}
155
156void LiveIntervals::dumpInstrs() const {
157 printInstrs(errs());
158}
159
160/// conflictsWithPhysRegDef - Returns true if the specified register
161/// is defined during the duration of the specified interval.
162bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
163 VirtRegMap &vrm, unsigned reg) {
164 for (LiveInterval::Ranges::const_iterator
165 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
166 for (SlotIndex index = I->start.getBaseIndex(),
167 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
168 index != end;
169 index = index.getNextIndex()) {
170 // skip deleted instructions
171 while (index != end && !getInstructionFromIndex(index))
172 index = index.getNextIndex();
173 if (index == end) break;
174
175 MachineInstr *MI = getInstructionFromIndex(index);
176 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
177 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
178 if (SrcReg == li.reg || DstReg == li.reg)
179 continue;
180 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
181 MachineOperand& mop = MI->getOperand(i);
182 if (!mop.isReg())
183 continue;
184 unsigned PhysReg = mop.getReg();
185 if (PhysReg == 0 || PhysReg == li.reg)
186 continue;
187 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
188 if (!vrm.hasPhys(PhysReg))
189 continue;
190 PhysReg = vrm.getPhys(PhysReg);
191 }
192 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
193 return true;
194 }
195 }
196 }
197
198 return false;
199}
200
201/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
202/// it can check use as well.
203bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
204 unsigned Reg, bool CheckUse,
205 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
206 for (LiveInterval::Ranges::const_iterator
207 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
208 for (SlotIndex index = I->start.getBaseIndex(),
209 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
210 index != end;
211 index = index.getNextIndex()) {
212 // Skip deleted instructions.
213 MachineInstr *MI = 0;
214 while (index != end) {
215 MI = getInstructionFromIndex(index);
216 if (MI)
217 break;
218 index = index.getNextIndex();
219 }
220 if (index == end) break;
221
222 if (JoinedCopies.count(MI))
223 continue;
224 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
225 MachineOperand& MO = MI->getOperand(i);
226 if (!MO.isReg())
227 continue;
228 if (MO.isUse() && !CheckUse)
229 continue;
230 unsigned PhysReg = MO.getReg();
231 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
232 continue;
233 if (tri_->isSubRegister(Reg, PhysReg))
234 return true;
235 }
236 }
237 }
238
239 return false;
240}
241
242#ifndef NDEBUG
243static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
244 if (TargetRegisterInfo::isPhysicalRegister(reg))
245 errs() << tri_->getName(reg);
246 else
247 errs() << "%reg" << reg;
248}
249#endif
250
251void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
252 MachineBasicBlock::iterator mi,
253 SlotIndex MIIdx,
254 MachineOperand& MO,
255 unsigned MOIdx,
256 LiveInterval &interval) {
257 DEBUG({
258 errs() << "\t\tregister: ";
259 printRegName(interval.reg, tri_);
260 });
261
262 // Virtual registers may be defined multiple times (due to phi
263 // elimination and 2-addr elimination). Much of what we do only has to be
264 // done once for the vreg. We use an empty interval to detect the first
265 // time we see a vreg.
266 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
267 if (interval.empty()) {
268 // Get the Idx of the defining instructions.
269 SlotIndex defIndex = MIIdx.getDefIndex();
270 // Earlyclobbers move back one, so that they overlap the live range
271 // of inputs.
272 if (MO.isEarlyClobber())
273 defIndex = MIIdx.getUseIndex();
274 VNInfo *ValNo;
275 MachineInstr *CopyMI = NULL;
276 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
277 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
278 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
279 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
280 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
281 CopyMI = mi;
282 // Earlyclobbers move back one.
283 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
284
285 assert(ValNo->id == 0 && "First value in interval is not 0?");
286
287 // Loop over all of the blocks that the vreg is defined in. There are
288 // two cases we have to handle here. The most common case is a vreg
289 // whose lifetime is contained within a basic block. In this case there
290 // will be a single kill, in MBB, which comes after the definition.
291 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
292 // FIXME: what about dead vars?
293 SlotIndex killIdx;
294 if (vi.Kills[0] != mi)
295 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
296 else
297 killIdx = defIndex.getStoreIndex();
298
299 // If the kill happens after the definition, we have an intra-block
300 // live range.
301 if (killIdx > defIndex) {
302 assert(vi.AliveBlocks.empty() &&
303 "Shouldn't be alive across any blocks!");
304 LiveRange LR(defIndex, killIdx, ValNo);
305 interval.addRange(LR);
306 DEBUG(errs() << " +" << LR << "\n");
307 ValNo->addKill(killIdx);
308 return;
309 }
310 }
311
312 // The other case we handle is when a virtual register lives to the end
313 // of the defining block, potentially live across some blocks, then is
314 // live into some number of blocks, but gets killed. Start by adding a
315 // range that goes from this definition to the end of the defining block.
316 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
317 ValNo);
318 DEBUG(errs() << " +" << NewLR);
319 interval.addRange(NewLR);
320
321 // Iterate over all of the blocks that the variable is completely
322 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
323 // live interval.
324 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
325 E = vi.AliveBlocks.end(); I != E; ++I) {
326 LiveRange LR(
327 getMBBStartIdx(mf_->getBlockNumbered(*I)),
328 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
329 ValNo);
330 interval.addRange(LR);
331 DEBUG(errs() << " +" << LR);
332 }
333
334 // Finally, this virtual register is live from the start of any killing
335 // block to the 'use' slot of the killing instruction.
336 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
337 MachineInstr *Kill = vi.Kills[i];
338 SlotIndex killIdx =
339 getInstructionIndex(Kill).getDefIndex();
340 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
341 interval.addRange(LR);
342 ValNo->addKill(killIdx);
343 DEBUG(errs() << " +" << LR);
344 }
345
346 } else {
347 // If this is the second time we see a virtual register definition, it
348 // must be due to phi elimination or two addr elimination. If this is
349 // the result of two address elimination, then the vreg is one of the
350 // def-and-use register operand.
351 if (mi->isRegTiedToUseOperand(MOIdx)) {
352 // If this is a two-address definition, then we have already processed
353 // the live range. The only problem is that we didn't realize there
354 // are actually two values in the live interval. Because of this we
355 // need to take the LiveRegion that defines this register and split it
356 // into two values.
357 assert(interval.containsOneValue());
358 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
359 SlotIndex RedefIndex = MIIdx.getDefIndex();
360 if (MO.isEarlyClobber())
361 RedefIndex = MIIdx.getUseIndex();
362
363 const LiveRange *OldLR =
364 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
365 VNInfo *OldValNo = OldLR->valno;
366
367 // Delete the initial value, which should be short and continuous,
368 // because the 2-addr copy must be in the same MBB as the redef.
369 interval.removeRange(DefIndex, RedefIndex);
370
371 // Two-address vregs should always only be redefined once. This means
372 // that at this point, there should be exactly one value number in it.
373 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
374
375 // The new value number (#1) is defined by the instruction we claimed
376 // defined value #0.
377 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
378 false, // update at *
379 VNInfoAllocator);
380 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
381
382 // Value#0 is now defined by the 2-addr instruction.
383 OldValNo->def = RedefIndex;
384 OldValNo->setCopy(0);
385 if (MO.isEarlyClobber())
386 OldValNo->setHasRedefByEC(true);
387
388 // Add the new live interval which replaces the range for the input copy.
389 LiveRange LR(DefIndex, RedefIndex, ValNo);
390 DEBUG(errs() << " replace range with " << LR);
391 interval.addRange(LR);
392 ValNo->addKill(RedefIndex);
393
394 // If this redefinition is dead, we need to add a dummy unit live
395 // range covering the def slot.
396 if (MO.isDead())
397 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
398 OldValNo));
399
400 DEBUG({
401 errs() << " RESULT: ";
402 interval.print(errs(), tri_);
403 });
404 } else {
405 // Otherwise, this must be because of phi elimination. If this is the
406 // first redefinition of the vreg that we have seen, go back and change
407 // the live range in the PHI block to be a different value number.
408 if (interval.containsOneValue()) {
409 // Remove the old range that we now know has an incorrect number.
410 VNInfo *VNI = interval.getValNumInfo(0);
411 MachineInstr *Killer = vi.Kills[0];
116
117 numIntervals += getNumIntervals();
118
119 DEBUG(dump());
120 return true;
121}
122
123/// print - Implement the dump method.
124void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
128 OS << "\n";
129 }
130
131 printInstrs(OS);
132}
133
134void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
136
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
140 for (MachineBasicBlock::iterator mii = mbbi->begin(),
141 mie = mbbi->end(); mii != mie; ++mii) {
142 OS << getInstructionIndex(mii) << '\t' << *mii;
143 }
144 }
145}
146
147void LiveIntervals::dumpInstrs() const {
148 printInstrs(errs());
149}
150
151/// conflictsWithPhysRegDef - Returns true if the specified register
152/// is defined during the duration of the specified interval.
153bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
154 VirtRegMap &vrm, unsigned reg) {
155 for (LiveInterval::Ranges::const_iterator
156 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
157 for (SlotIndex index = I->start.getBaseIndex(),
158 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
159 index != end;
160 index = index.getNextIndex()) {
161 // skip deleted instructions
162 while (index != end && !getInstructionFromIndex(index))
163 index = index.getNextIndex();
164 if (index == end) break;
165
166 MachineInstr *MI = getInstructionFromIndex(index);
167 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
168 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
169 if (SrcReg == li.reg || DstReg == li.reg)
170 continue;
171 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
172 MachineOperand& mop = MI->getOperand(i);
173 if (!mop.isReg())
174 continue;
175 unsigned PhysReg = mop.getReg();
176 if (PhysReg == 0 || PhysReg == li.reg)
177 continue;
178 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
179 if (!vrm.hasPhys(PhysReg))
180 continue;
181 PhysReg = vrm.getPhys(PhysReg);
182 }
183 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
184 return true;
185 }
186 }
187 }
188
189 return false;
190}
191
192/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
193/// it can check use as well.
194bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
195 unsigned Reg, bool CheckUse,
196 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
197 for (LiveInterval::Ranges::const_iterator
198 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
199 for (SlotIndex index = I->start.getBaseIndex(),
200 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
201 index != end;
202 index = index.getNextIndex()) {
203 // Skip deleted instructions.
204 MachineInstr *MI = 0;
205 while (index != end) {
206 MI = getInstructionFromIndex(index);
207 if (MI)
208 break;
209 index = index.getNextIndex();
210 }
211 if (index == end) break;
212
213 if (JoinedCopies.count(MI))
214 continue;
215 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
216 MachineOperand& MO = MI->getOperand(i);
217 if (!MO.isReg())
218 continue;
219 if (MO.isUse() && !CheckUse)
220 continue;
221 unsigned PhysReg = MO.getReg();
222 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
223 continue;
224 if (tri_->isSubRegister(Reg, PhysReg))
225 return true;
226 }
227 }
228 }
229
230 return false;
231}
232
233#ifndef NDEBUG
234static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
235 if (TargetRegisterInfo::isPhysicalRegister(reg))
236 errs() << tri_->getName(reg);
237 else
238 errs() << "%reg" << reg;
239}
240#endif
241
242void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
243 MachineBasicBlock::iterator mi,
244 SlotIndex MIIdx,
245 MachineOperand& MO,
246 unsigned MOIdx,
247 LiveInterval &interval) {
248 DEBUG({
249 errs() << "\t\tregister: ";
250 printRegName(interval.reg, tri_);
251 });
252
253 // Virtual registers may be defined multiple times (due to phi
254 // elimination and 2-addr elimination). Much of what we do only has to be
255 // done once for the vreg. We use an empty interval to detect the first
256 // time we see a vreg.
257 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
258 if (interval.empty()) {
259 // Get the Idx of the defining instructions.
260 SlotIndex defIndex = MIIdx.getDefIndex();
261 // Earlyclobbers move back one, so that they overlap the live range
262 // of inputs.
263 if (MO.isEarlyClobber())
264 defIndex = MIIdx.getUseIndex();
265 VNInfo *ValNo;
266 MachineInstr *CopyMI = NULL;
267 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
268 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
269 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
270 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
271 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
272 CopyMI = mi;
273 // Earlyclobbers move back one.
274 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
275
276 assert(ValNo->id == 0 && "First value in interval is not 0?");
277
278 // Loop over all of the blocks that the vreg is defined in. There are
279 // two cases we have to handle here. The most common case is a vreg
280 // whose lifetime is contained within a basic block. In this case there
281 // will be a single kill, in MBB, which comes after the definition.
282 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
283 // FIXME: what about dead vars?
284 SlotIndex killIdx;
285 if (vi.Kills[0] != mi)
286 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
287 else
288 killIdx = defIndex.getStoreIndex();
289
290 // If the kill happens after the definition, we have an intra-block
291 // live range.
292 if (killIdx > defIndex) {
293 assert(vi.AliveBlocks.empty() &&
294 "Shouldn't be alive across any blocks!");
295 LiveRange LR(defIndex, killIdx, ValNo);
296 interval.addRange(LR);
297 DEBUG(errs() << " +" << LR << "\n");
298 ValNo->addKill(killIdx);
299 return;
300 }
301 }
302
303 // The other case we handle is when a virtual register lives to the end
304 // of the defining block, potentially live across some blocks, then is
305 // live into some number of blocks, but gets killed. Start by adding a
306 // range that goes from this definition to the end of the defining block.
307 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
308 ValNo);
309 DEBUG(errs() << " +" << NewLR);
310 interval.addRange(NewLR);
311
312 // Iterate over all of the blocks that the variable is completely
313 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
314 // live interval.
315 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
316 E = vi.AliveBlocks.end(); I != E; ++I) {
317 LiveRange LR(
318 getMBBStartIdx(mf_->getBlockNumbered(*I)),
319 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
320 ValNo);
321 interval.addRange(LR);
322 DEBUG(errs() << " +" << LR);
323 }
324
325 // Finally, this virtual register is live from the start of any killing
326 // block to the 'use' slot of the killing instruction.
327 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
328 MachineInstr *Kill = vi.Kills[i];
329 SlotIndex killIdx =
330 getInstructionIndex(Kill).getDefIndex();
331 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
332 interval.addRange(LR);
333 ValNo->addKill(killIdx);
334 DEBUG(errs() << " +" << LR);
335 }
336
337 } else {
338 // If this is the second time we see a virtual register definition, it
339 // must be due to phi elimination or two addr elimination. If this is
340 // the result of two address elimination, then the vreg is one of the
341 // def-and-use register operand.
342 if (mi->isRegTiedToUseOperand(MOIdx)) {
343 // If this is a two-address definition, then we have already processed
344 // the live range. The only problem is that we didn't realize there
345 // are actually two values in the live interval. Because of this we
346 // need to take the LiveRegion that defines this register and split it
347 // into two values.
348 assert(interval.containsOneValue());
349 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
350 SlotIndex RedefIndex = MIIdx.getDefIndex();
351 if (MO.isEarlyClobber())
352 RedefIndex = MIIdx.getUseIndex();
353
354 const LiveRange *OldLR =
355 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
356 VNInfo *OldValNo = OldLR->valno;
357
358 // Delete the initial value, which should be short and continuous,
359 // because the 2-addr copy must be in the same MBB as the redef.
360 interval.removeRange(DefIndex, RedefIndex);
361
362 // Two-address vregs should always only be redefined once. This means
363 // that at this point, there should be exactly one value number in it.
364 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
365
366 // The new value number (#1) is defined by the instruction we claimed
367 // defined value #0.
368 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
369 false, // update at *
370 VNInfoAllocator);
371 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
372
373 // Value#0 is now defined by the 2-addr instruction.
374 OldValNo->def = RedefIndex;
375 OldValNo->setCopy(0);
376 if (MO.isEarlyClobber())
377 OldValNo->setHasRedefByEC(true);
378
379 // Add the new live interval which replaces the range for the input copy.
380 LiveRange LR(DefIndex, RedefIndex, ValNo);
381 DEBUG(errs() << " replace range with " << LR);
382 interval.addRange(LR);
383 ValNo->addKill(RedefIndex);
384
385 // If this redefinition is dead, we need to add a dummy unit live
386 // range covering the def slot.
387 if (MO.isDead())
388 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
389 OldValNo));
390
391 DEBUG({
392 errs() << " RESULT: ";
393 interval.print(errs(), tri_);
394 });
395 } else {
396 // Otherwise, this must be because of phi elimination. If this is the
397 // first redefinition of the vreg that we have seen, go back and change
398 // the live range in the PHI block to be a different value number.
399 if (interval.containsOneValue()) {
400 // Remove the old range that we now know has an incorrect number.
401 VNInfo *VNI = interval.getValNumInfo(0);
402 MachineInstr *Killer = vi.Kills[0];
412 phiJoinCopies.push_back(Killer);
413 SlotIndex Start = getMBBStartIdx(Killer->getParent());
414 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
415 DEBUG({
416 errs() << " Removing [" << Start << "," << End << "] from: ";
417 interval.print(errs(), tri_);
418 errs() << "\n";
419 });
420 interval.removeRange(Start, End);
421 assert(interval.ranges.size() == 1 &&
422 "Newly discovered PHI interval has >1 ranges.");
423 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
424 VNI->addKill(indexes_->getTerminatorGap(killMBB));
425 VNI->setHasPHIKill(true);
426 DEBUG({
427 errs() << " RESULT: ";
428 interval.print(errs(), tri_);
429 });
430
431 // Replace the interval with one of a NEW value number. Note that this
432 // value number isn't actually defined by an instruction, weird huh? :)
433 LiveRange LR(Start, End,
434 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
435 0, false, VNInfoAllocator));
436 LR.valno->setIsPHIDef(true);
437 DEBUG(errs() << " replace range with " << LR);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
440 DEBUG({
441 errs() << " RESULT: ";
442 interval.print(errs(), tri_);
443 });
444 }
445
446 // In the case of PHI elimination, each variable definition is only
447 // live until the end of the block. We've already taken care of the
448 // rest of the live range.
449 SlotIndex defIndex = MIIdx.getDefIndex();
450 if (MO.isEarlyClobber())
451 defIndex = MIIdx.getUseIndex();
452
453 VNInfo *ValNo;
454 MachineInstr *CopyMI = NULL;
455 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
456 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
458 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
459 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
460 CopyMI = mi;
461 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
462
463 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
464 LiveRange LR(defIndex, killIndex, ValNo);
465 interval.addRange(LR);
466 ValNo->addKill(indexes_->getTerminatorGap(mbb));
467 ValNo->setHasPHIKill(true);
468 DEBUG(errs() << " +" << LR);
469 }
470 }
471
472 DEBUG(errs() << '\n');
473}
474
475void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
476 MachineBasicBlock::iterator mi,
477 SlotIndex MIIdx,
478 MachineOperand& MO,
479 LiveInterval &interval,
480 MachineInstr *CopyMI) {
481 // A physical register cannot be live across basic block, so its
482 // lifetime must end somewhere in its defining basic block.
483 DEBUG({
484 errs() << "\t\tregister: ";
485 printRegName(interval.reg, tri_);
486 });
487
488 SlotIndex baseIndex = MIIdx;
489 SlotIndex start = baseIndex.getDefIndex();
490 // Earlyclobbers move back one.
491 if (MO.isEarlyClobber())
492 start = MIIdx.getUseIndex();
493 SlotIndex end = start;
494
495 // If it is not used after definition, it is considered dead at
496 // the instruction defining it. Hence its interval is:
497 // [defSlot(def), defSlot(def)+1)
498 // For earlyclobbers, the defSlot was pushed back one; the extra
499 // advance below compensates.
500 if (MO.isDead()) {
501 DEBUG(errs() << " dead");
502 end = start.getStoreIndex();
503 goto exit;
504 }
505
506 // If it is not dead on definition, it must be killed by a
507 // subsequent instruction. Hence its interval is:
508 // [defSlot(def), useSlot(kill)+1)
509 baseIndex = baseIndex.getNextIndex();
510 while (++mi != MBB->end()) {
511
512 if (getInstructionFromIndex(baseIndex) == 0)
513 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
514
515 if (mi->killsRegister(interval.reg, tri_)) {
516 DEBUG(errs() << " killed");
517 end = baseIndex.getDefIndex();
518 goto exit;
519 } else {
520 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
521 if (DefIdx != -1) {
522 if (mi->isRegTiedToUseOperand(DefIdx)) {
523 // Two-address instruction.
524 end = baseIndex.getDefIndex();
525 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
526 "Two address instruction is an early clobber?");
527 } else {
528 // Another instruction redefines the register before it is ever read.
529 // Then the register is essentially dead at the instruction that defines
530 // it. Hence its interval is:
531 // [defSlot(def), defSlot(def)+1)
532 DEBUG(errs() << " dead");
533 end = start.getStoreIndex();
534 }
535 goto exit;
536 }
537 }
538
539 baseIndex = baseIndex.getNextIndex();
540 }
541
542 // The only case we should have a dead physreg here without a killing or
543 // instruction where we know it's dead is if it is live-in to the function
544 // and never used. Another possible case is the implicit use of the
545 // physical register has been deleted by two-address pass.
546 end = start.getStoreIndex();
547
548exit:
549 assert(start < end && "did not find end of interval?");
550
551 // Already exists? Extend old live interval.
552 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
553 bool Extend = OldLR != interval.end();
554 VNInfo *ValNo = Extend
555 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
556 if (MO.isEarlyClobber() && Extend)
557 ValNo->setHasRedefByEC(true);
558 LiveRange LR(start, end, ValNo);
559 interval.addRange(LR);
560 LR.valno->addKill(end);
561 DEBUG(errs() << " +" << LR << '\n');
562}
563
564void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
565 MachineBasicBlock::iterator MI,
566 SlotIndex MIIdx,
567 MachineOperand& MO,
568 unsigned MOIdx) {
569 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
570 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
571 getOrCreateInterval(MO.getReg()));
572 else if (allocatableRegs_[MO.getReg()]) {
573 MachineInstr *CopyMI = NULL;
574 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
575 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
576 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
577 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
578 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
579 CopyMI = MI;
580 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
581 getOrCreateInterval(MO.getReg()), CopyMI);
582 // Def of a register also defines its sub-registers.
583 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
584 // If MI also modifies the sub-register explicitly, avoid processing it
585 // more than once. Do not pass in TRI here so it checks for exact match.
586 if (!MI->modifiesRegister(*AS))
587 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
588 getOrCreateInterval(*AS), 0);
589 }
590}
591
592void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 SlotIndex MIIdx,
594 LiveInterval &interval, bool isAlias) {
595 DEBUG({
596 errs() << "\t\tlivein register: ";
597 printRegName(interval.reg, tri_);
598 });
599
600 // Look for kills, if it reaches a def before it's killed, then it shouldn't
601 // be considered a livein.
602 MachineBasicBlock::iterator mi = MBB->begin();
603 SlotIndex baseIndex = MIIdx;
604 SlotIndex start = baseIndex;
605 if (getInstructionFromIndex(baseIndex) == 0)
606 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607
608 SlotIndex end = baseIndex;
609 bool SeenDefUse = false;
610
611 while (mi != MBB->end()) {
612 if (mi->killsRegister(interval.reg, tri_)) {
613 DEBUG(errs() << " killed");
614 end = baseIndex.getDefIndex();
615 SeenDefUse = true;
616 break;
617 } else if (mi->modifiesRegister(interval.reg, tri_)) {
618 // Another instruction redefines the register before it is ever read.
619 // Then the register is essentially dead at the instruction that defines
620 // it. Hence its interval is:
621 // [defSlot(def), defSlot(def)+1)
622 DEBUG(errs() << " dead");
623 end = start.getStoreIndex();
624 SeenDefUse = true;
625 break;
626 }
627
628 ++mi;
629 if (mi != MBB->end()) {
630 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
631 }
632 }
633
634 // Live-in register might not be used at all.
635 if (!SeenDefUse) {
636 if (isAlias) {
637 DEBUG(errs() << " dead");
638 end = MIIdx.getStoreIndex();
639 } else {
640 DEBUG(errs() << " live through");
641 end = baseIndex;
642 }
643 }
644
645 VNInfo *vni =
646 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
647 0, false, VNInfoAllocator);
648 vni->setIsPHIDef(true);
649 LiveRange LR(start, end, vni);
650
651 interval.addRange(LR);
652 LR.valno->addKill(end);
653 DEBUG(errs() << " +" << LR << '\n');
654}
655
403 SlotIndex Start = getMBBStartIdx(Killer->getParent());
404 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
405 DEBUG({
406 errs() << " Removing [" << Start << "," << End << "] from: ";
407 interval.print(errs(), tri_);
408 errs() << "\n";
409 });
410 interval.removeRange(Start, End);
411 assert(interval.ranges.size() == 1 &&
412 "Newly discovered PHI interval has >1 ranges.");
413 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
414 VNI->addKill(indexes_->getTerminatorGap(killMBB));
415 VNI->setHasPHIKill(true);
416 DEBUG({
417 errs() << " RESULT: ";
418 interval.print(errs(), tri_);
419 });
420
421 // Replace the interval with one of a NEW value number. Note that this
422 // value number isn't actually defined by an instruction, weird huh? :)
423 LiveRange LR(Start, End,
424 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
425 0, false, VNInfoAllocator));
426 LR.valno->setIsPHIDef(true);
427 DEBUG(errs() << " replace range with " << LR);
428 interval.addRange(LR);
429 LR.valno->addKill(End);
430 DEBUG({
431 errs() << " RESULT: ";
432 interval.print(errs(), tri_);
433 });
434 }
435
436 // In the case of PHI elimination, each variable definition is only
437 // live until the end of the block. We've already taken care of the
438 // rest of the live range.
439 SlotIndex defIndex = MIIdx.getDefIndex();
440 if (MO.isEarlyClobber())
441 defIndex = MIIdx.getUseIndex();
442
443 VNInfo *ValNo;
444 MachineInstr *CopyMI = NULL;
445 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
446 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
447 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
448 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
449 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
450 CopyMI = mi;
451 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
452
453 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
454 LiveRange LR(defIndex, killIndex, ValNo);
455 interval.addRange(LR);
456 ValNo->addKill(indexes_->getTerminatorGap(mbb));
457 ValNo->setHasPHIKill(true);
458 DEBUG(errs() << " +" << LR);
459 }
460 }
461
462 DEBUG(errs() << '\n');
463}
464
465void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
466 MachineBasicBlock::iterator mi,
467 SlotIndex MIIdx,
468 MachineOperand& MO,
469 LiveInterval &interval,
470 MachineInstr *CopyMI) {
471 // A physical register cannot be live across basic block, so its
472 // lifetime must end somewhere in its defining basic block.
473 DEBUG({
474 errs() << "\t\tregister: ";
475 printRegName(interval.reg, tri_);
476 });
477
478 SlotIndex baseIndex = MIIdx;
479 SlotIndex start = baseIndex.getDefIndex();
480 // Earlyclobbers move back one.
481 if (MO.isEarlyClobber())
482 start = MIIdx.getUseIndex();
483 SlotIndex end = start;
484
485 // If it is not used after definition, it is considered dead at
486 // the instruction defining it. Hence its interval is:
487 // [defSlot(def), defSlot(def)+1)
488 // For earlyclobbers, the defSlot was pushed back one; the extra
489 // advance below compensates.
490 if (MO.isDead()) {
491 DEBUG(errs() << " dead");
492 end = start.getStoreIndex();
493 goto exit;
494 }
495
496 // If it is not dead on definition, it must be killed by a
497 // subsequent instruction. Hence its interval is:
498 // [defSlot(def), useSlot(kill)+1)
499 baseIndex = baseIndex.getNextIndex();
500 while (++mi != MBB->end()) {
501
502 if (getInstructionFromIndex(baseIndex) == 0)
503 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
504
505 if (mi->killsRegister(interval.reg, tri_)) {
506 DEBUG(errs() << " killed");
507 end = baseIndex.getDefIndex();
508 goto exit;
509 } else {
510 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
511 if (DefIdx != -1) {
512 if (mi->isRegTiedToUseOperand(DefIdx)) {
513 // Two-address instruction.
514 end = baseIndex.getDefIndex();
515 assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
516 "Two address instruction is an early clobber?");
517 } else {
518 // Another instruction redefines the register before it is ever read.
519 // Then the register is essentially dead at the instruction that defines
520 // it. Hence its interval is:
521 // [defSlot(def), defSlot(def)+1)
522 DEBUG(errs() << " dead");
523 end = start.getStoreIndex();
524 }
525 goto exit;
526 }
527 }
528
529 baseIndex = baseIndex.getNextIndex();
530 }
531
532 // The only case we should have a dead physreg here without a killing or
533 // instruction where we know it's dead is if it is live-in to the function
534 // and never used. Another possible case is the implicit use of the
535 // physical register has been deleted by two-address pass.
536 end = start.getStoreIndex();
537
538exit:
539 assert(start < end && "did not find end of interval?");
540
541 // Already exists? Extend old live interval.
542 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
543 bool Extend = OldLR != interval.end();
544 VNInfo *ValNo = Extend
545 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
546 if (MO.isEarlyClobber() && Extend)
547 ValNo->setHasRedefByEC(true);
548 LiveRange LR(start, end, ValNo);
549 interval.addRange(LR);
550 LR.valno->addKill(end);
551 DEBUG(errs() << " +" << LR << '\n');
552}
553
554void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
555 MachineBasicBlock::iterator MI,
556 SlotIndex MIIdx,
557 MachineOperand& MO,
558 unsigned MOIdx) {
559 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
560 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
561 getOrCreateInterval(MO.getReg()));
562 else if (allocatableRegs_[MO.getReg()]) {
563 MachineInstr *CopyMI = NULL;
564 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
565 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
566 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
567 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
568 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
569 CopyMI = MI;
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(MO.getReg()), CopyMI);
572 // Def of a register also defines its sub-registers.
573 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
574 // If MI also modifies the sub-register explicitly, avoid processing it
575 // more than once. Do not pass in TRI here so it checks for exact match.
576 if (!MI->modifiesRegister(*AS))
577 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
578 getOrCreateInterval(*AS), 0);
579 }
580}
581
582void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
583 SlotIndex MIIdx,
584 LiveInterval &interval, bool isAlias) {
585 DEBUG({
586 errs() << "\t\tlivein register: ";
587 printRegName(interval.reg, tri_);
588 });
589
590 // Look for kills, if it reaches a def before it's killed, then it shouldn't
591 // be considered a livein.
592 MachineBasicBlock::iterator mi = MBB->begin();
593 SlotIndex baseIndex = MIIdx;
594 SlotIndex start = baseIndex;
595 if (getInstructionFromIndex(baseIndex) == 0)
596 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
597
598 SlotIndex end = baseIndex;
599 bool SeenDefUse = false;
600
601 while (mi != MBB->end()) {
602 if (mi->killsRegister(interval.reg, tri_)) {
603 DEBUG(errs() << " killed");
604 end = baseIndex.getDefIndex();
605 SeenDefUse = true;
606 break;
607 } else if (mi->modifiesRegister(interval.reg, tri_)) {
608 // Another instruction redefines the register before it is ever read.
609 // Then the register is essentially dead at the instruction that defines
610 // it. Hence its interval is:
611 // [defSlot(def), defSlot(def)+1)
612 DEBUG(errs() << " dead");
613 end = start.getStoreIndex();
614 SeenDefUse = true;
615 break;
616 }
617
618 ++mi;
619 if (mi != MBB->end()) {
620 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
621 }
622 }
623
624 // Live-in register might not be used at all.
625 if (!SeenDefUse) {
626 if (isAlias) {
627 DEBUG(errs() << " dead");
628 end = MIIdx.getStoreIndex();
629 } else {
630 DEBUG(errs() << " live through");
631 end = baseIndex;
632 }
633 }
634
635 VNInfo *vni =
636 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
637 0, false, VNInfoAllocator);
638 vni->setIsPHIDef(true);
639 LiveRange LR(start, end, vni);
640
641 interval.addRange(LR);
642 LR.valno->addKill(end);
643 DEBUG(errs() << " +" << LR << '\n');
644}
645
656bool LiveIntervals::
657isSafeAndProfitableToCoalesce(LiveInterval &DstInt,
658 LiveInterval &SrcInt,
659 SmallVector<MachineInstr*,16> &IdentCopies,
660 SmallVector<MachineInstr*,16> &OtherCopies) {
661 unsigned NumIdent = 0;
662 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
663 re = mri_->def_end(); ri != re; ++ri) {
664 MachineInstr *MI = &*ri;
665 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
666 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
667 return false;
668 if (SrcReg != DstInt.reg) {
669 // Non-identity copy - we cannot handle overlapping intervals
670 if (DstInt.liveAt(getInstructionIndex(MI)))
671 return false;
672 OtherCopies.push_back(MI);
673 } else {
674 IdentCopies.push_back(MI);
675 ++NumIdent;
676 }
677 }
678
679 return IdentCopies.size() > OtherCopies.size();
680}
681
682void LiveIntervals::performEarlyCoalescing() {
683 if (!EarlyCoalescing)
684 return;
685
686 /// Perform early coalescing: eliminate copies which feed into phi joins
687 /// and whose sources are defined by the phi joins.
688 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
689 MachineInstr *Join = phiJoinCopies[i];
690 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
691 break;
692
693 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
694 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
695#ifndef NDEBUG
696 assert(isMove && "PHI join instruction must be a move!");
697#else
698 isMove = isMove;
699#endif
700
701 LiveInterval &DstInt = getInterval(PHIDst);
702 LiveInterval &SrcInt = getInterval(PHISrc);
703 SmallVector<MachineInstr*, 16> IdentCopies;
704 SmallVector<MachineInstr*, 16> OtherCopies;
705 if (!isSafeAndProfitableToCoalesce(DstInt, SrcInt,
706 IdentCopies, OtherCopies))
707 continue;
708
709 DEBUG(errs() << "PHI Join: " << *Join);
710 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
711 assert(std::distance(mri_->use_begin(PHISrc), mri_->use_end()) == 1 &&
712 "PHI join src should not be used elsewhere");
713 VNInfo *VNI = DstInt.getValNumInfo(0);
714
715 // Change the non-identity copies to directly target the phi destination.
716 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
717 MachineInstr *PHICopy = OtherCopies[i];
718 SlotIndex MIIndex = getInstructionIndex(PHICopy);
719 DEBUG(errs() << "Moving: " << MIIndex << ' ' << *PHICopy);
720 SlotIndex DefIndex = MIIndex.getDefIndex();
721 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
722 SlotIndex StartIndex = SLR->start;
723 SlotIndex EndIndex = SLR->end;
724
725 // Delete val# defined by the now identity copy and add the range from
726 // beginning of the mbb to the end of the range.
727 SrcInt.removeValNo(SLR->valno);
728 DEBUG(errs() << " added range [" << StartIndex << ','
729 << EndIndex << "] to reg" << DstInt.reg << '\n');
730 assert (!DstInt.liveAt(StartIndex) && "Cannot coalesce when dst live!");
731 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
732 VNInfoAllocator);
733 NewVNI->setHasPHIKill(true);
734 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
735 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
736 MachineOperand &MO = PHICopy->getOperand(j);
737 if (!MO.isReg() || MO.getReg() != PHISrc)
738 continue;
739 MO.setReg(PHIDst);
740 }
741 }
742
743 // Now let's eliminate all the would-be identity copies.
744 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
745 MachineInstr *PHICopy = IdentCopies[i];
746 DEBUG(errs() << "Coalescing: " << *PHICopy);
747
748 SlotIndex MIIndex = getInstructionIndex(PHICopy);
749 SlotIndex DefIndex = MIIndex.getDefIndex();
750 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
751 SlotIndex StartIndex = SLR->start;
752 SlotIndex EndIndex = SLR->end;
753
754 // Delete val# defined by the now identity copy and add the range from
755 // beginning of the mbb to the end of the range.
756 SrcInt.removeValNo(SLR->valno);
757 RemoveMachineInstrFromMaps(PHICopy);
758 PHICopy->eraseFromParent();
759 DEBUG(errs() << " added range [" << StartIndex << ','
760 << EndIndex << "] to reg" << DstInt.reg << '\n');
761 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
762 }
763
764 // Remove the phi join and update the phi block liveness.
765 SlotIndex MIIndex = getInstructionIndex(Join);
766 SlotIndex UseIndex = MIIndex.getUseIndex();
767 SlotIndex DefIndex = MIIndex.getDefIndex();
768 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
769 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
770 DLR->valno->setCopy(0);
771 DLR->valno->setIsDefAccurate(false);
772 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
773 SrcInt.removeRange(SLR->start, SLR->end);
774 assert(SrcInt.empty());
775 removeInterval(PHISrc);
776 RemoveMachineInstrFromMaps(Join);
777 Join->eraseFromParent();
778
779 ++numCoalescing;
780 }
781}
782
783/// computeIntervals - computes the live intervals for virtual
784/// registers. for some ordering of the machine instructions [1,N] a
785/// live interval is an interval [i, j) where 1 <= i <= j < N for
786/// which a variable is live
787void LiveIntervals::computeIntervals() {
788 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
789 << "********** Function: "
790 << ((Value*)mf_->getFunction())->getName() << '\n');
791
792 SmallVector<unsigned, 8> UndefUses;
793 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
794 MBBI != E; ++MBBI) {
795 MachineBasicBlock *MBB = MBBI;
796 // Track the index of the current machine instr.
797 SlotIndex MIIndex = getMBBStartIdx(MBB);
798 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
799
800 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
801
802 // Create intervals for live-ins to this BB first.
803 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
804 LE = MBB->livein_end(); LI != LE; ++LI) {
805 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
806 // Multiple live-ins can alias the same register.
807 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
808 if (!hasInterval(*AS))
809 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
810 true);
811 }
812
813 // Skip over empty initial indices.
814 if (getInstructionFromIndex(MIIndex) == 0)
815 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
816
817 for (; MI != miEnd; ++MI) {
818 DEBUG(errs() << MIIndex << "\t" << *MI);
819
820 // Handle defs.
821 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
822 MachineOperand &MO = MI->getOperand(i);
823 if (!MO.isReg() || !MO.getReg())
824 continue;
825
826 // handle register defs - build intervals
827 if (MO.isDef())
828 handleRegisterDef(MBB, MI, MIIndex, MO, i);
829 else if (MO.isUndef())
830 UndefUses.push_back(MO.getReg());
831 }
832
833 // Move to the next instr slot.
834 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
835 }
836 }
837
838 // Create empty intervals for registers defined by implicit_def's (except
839 // for those implicit_def that define values which are liveout of their
840 // blocks.
841 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
842 unsigned UndefReg = UndefUses[i];
843 (void)getOrCreateInterval(UndefReg);
844 }
845}
846
847LiveInterval* LiveIntervals::createInterval(unsigned reg) {
848 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
849 return new LiveInterval(reg, Weight);
850}
851
852/// dupInterval - Duplicate a live interval. The caller is responsible for
853/// managing the allocated memory.
854LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
855 LiveInterval *NewLI = createInterval(li->reg);
856 NewLI->Copy(*li, mri_, getVNInfoAllocator());
857 return NewLI;
858}
859
860/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
861/// copy field and returns the source register that defines it.
862unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
863 if (!VNI->getCopy())
864 return 0;
865
866 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
867 // If it's extracting out of a physical register, return the sub-register.
868 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
869 if (TargetRegisterInfo::isPhysicalRegister(Reg))
870 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
871 return Reg;
872 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
873 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
874 return VNI->getCopy()->getOperand(2).getReg();
875
876 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
877 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
878 return SrcReg;
879 llvm_unreachable("Unrecognized copy instruction!");
880 return 0;
881}
882
883//===----------------------------------------------------------------------===//
884// Register allocator hooks.
885//
886
887/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
888/// allow one) virtual register operand, then its uses are implicitly using
889/// the register. Returns the virtual register.
890unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
891 MachineInstr *MI) const {
892 unsigned RegOp = 0;
893 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
894 MachineOperand &MO = MI->getOperand(i);
895 if (!MO.isReg() || !MO.isUse())
896 continue;
897 unsigned Reg = MO.getReg();
898 if (Reg == 0 || Reg == li.reg)
899 continue;
900
901 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
902 !allocatableRegs_[Reg])
903 continue;
904 // FIXME: For now, only remat MI with at most one register operand.
905 assert(!RegOp &&
906 "Can't rematerialize instruction with multiple register operand!");
907 RegOp = MO.getReg();
908#ifndef NDEBUG
909 break;
910#endif
911 }
912 return RegOp;
913}
914
915/// isValNoAvailableAt - Return true if the val# of the specified interval
916/// which reaches the given instruction also reaches the specified use index.
917bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
918 SlotIndex UseIdx) const {
919 SlotIndex Index = getInstructionIndex(MI);
920 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
921 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
922 return UI != li.end() && UI->valno == ValNo;
923}
924
925/// isReMaterializable - Returns true if the definition MI of the specified
926/// val# of the specified interval is re-materializable.
927bool LiveIntervals::isReMaterializable(const LiveInterval &li,
928 const VNInfo *ValNo, MachineInstr *MI,
929 SmallVectorImpl<LiveInterval*> &SpillIs,
930 bool &isLoad) {
931 if (DisableReMat)
932 return false;
933
934 if (!tii_->isTriviallyReMaterializable(MI, aa_))
935 return false;
936
937 // Target-specific code can mark an instruction as being rematerializable
938 // if it has one virtual reg use, though it had better be something like
939 // a PIC base register which is likely to be live everywhere.
940 unsigned ImpUse = getReMatImplicitUse(li, MI);
941 if (ImpUse) {
942 const LiveInterval &ImpLi = getInterval(ImpUse);
943 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
944 re = mri_->use_end(); ri != re; ++ri) {
945 MachineInstr *UseMI = &*ri;
946 SlotIndex UseIdx = getInstructionIndex(UseMI);
947 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
948 continue;
949 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
950 return false;
951 }
952
953 // If a register operand of the re-materialized instruction is going to
954 // be spilled next, then it's not legal to re-materialize this instruction.
955 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
956 if (ImpUse == SpillIs[i]->reg)
957 return false;
958 }
959 return true;
960}
961
962/// isReMaterializable - Returns true if the definition MI of the specified
963/// val# of the specified interval is re-materializable.
964bool LiveIntervals::isReMaterializable(const LiveInterval &li,
965 const VNInfo *ValNo, MachineInstr *MI) {
966 SmallVector<LiveInterval*, 4> Dummy1;
967 bool Dummy2;
968 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
969}
970
971/// isReMaterializable - Returns true if every definition of MI of every
972/// val# of the specified interval is re-materializable.
973bool LiveIntervals::isReMaterializable(const LiveInterval &li,
974 SmallVectorImpl<LiveInterval*> &SpillIs,
975 bool &isLoad) {
976 isLoad = false;
977 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
978 i != e; ++i) {
979 const VNInfo *VNI = *i;
980 if (VNI->isUnused())
981 continue; // Dead val#.
982 // Is the def for the val# rematerializable?
983 if (!VNI->isDefAccurate())
984 return false;
985 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
986 bool DefIsLoad = false;
987 if (!ReMatDefMI ||
988 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
989 return false;
990 isLoad |= DefIsLoad;
991 }
992 return true;
993}
994
995/// FilterFoldedOps - Filter out two-address use operands. Return
996/// true if it finds any issue with the operands that ought to prevent
997/// folding.
998static bool FilterFoldedOps(MachineInstr *MI,
999 SmallVector<unsigned, 2> &Ops,
1000 unsigned &MRInfo,
1001 SmallVector<unsigned, 2> &FoldOps) {
1002 MRInfo = 0;
1003 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1004 unsigned OpIdx = Ops[i];
1005 MachineOperand &MO = MI->getOperand(OpIdx);
1006 // FIXME: fold subreg use.
1007 if (MO.getSubReg())
1008 return true;
1009 if (MO.isDef())
1010 MRInfo |= (unsigned)VirtRegMap::isMod;
1011 else {
1012 // Filter out two-address use operand(s).
1013 if (MI->isRegTiedToDefOperand(OpIdx)) {
1014 MRInfo = VirtRegMap::isModRef;
1015 continue;
1016 }
1017 MRInfo |= (unsigned)VirtRegMap::isRef;
1018 }
1019 FoldOps.push_back(OpIdx);
1020 }
1021 return false;
1022}
1023
1024
1025/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1026/// slot / to reg or any rematerialized load into ith operand of specified
1027/// MI. If it is successul, MI is updated with the newly created MI and
1028/// returns true.
1029bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1030 VirtRegMap &vrm, MachineInstr *DefMI,
1031 SlotIndex InstrIdx,
1032 SmallVector<unsigned, 2> &Ops,
1033 bool isSS, int Slot, unsigned Reg) {
1034 // If it is an implicit def instruction, just delete it.
1035 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1036 RemoveMachineInstrFromMaps(MI);
1037 vrm.RemoveMachineInstrFromMaps(MI);
1038 MI->eraseFromParent();
1039 ++numFolds;
1040 return true;
1041 }
1042
1043 // Filter the list of operand indexes that are to be folded. Abort if
1044 // any operand will prevent folding.
1045 unsigned MRInfo = 0;
1046 SmallVector<unsigned, 2> FoldOps;
1047 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1048 return false;
1049
1050 // The only time it's safe to fold into a two address instruction is when
1051 // it's folding reload and spill from / into a spill stack slot.
1052 if (DefMI && (MRInfo & VirtRegMap::isMod))
1053 return false;
1054
1055 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1056 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1057 if (fmi) {
1058 // Remember this instruction uses the spill slot.
1059 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1060
1061 // Attempt to fold the memory reference into the instruction. If
1062 // we can do this, we don't need to insert spill code.
1063 MachineBasicBlock &MBB = *MI->getParent();
1064 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1065 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1066 vrm.transferSpillPts(MI, fmi);
1067 vrm.transferRestorePts(MI, fmi);
1068 vrm.transferEmergencySpills(MI, fmi);
1069 ReplaceMachineInstrInMaps(MI, fmi);
1070 MI = MBB.insert(MBB.erase(MI), fmi);
1071 ++numFolds;
1072 return true;
1073 }
1074 return false;
1075}
1076
1077/// canFoldMemoryOperand - Returns true if the specified load / store
1078/// folding is possible.
1079bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1080 SmallVector<unsigned, 2> &Ops,
1081 bool ReMat) const {
1082 // Filter the list of operand indexes that are to be folded. Abort if
1083 // any operand will prevent folding.
1084 unsigned MRInfo = 0;
1085 SmallVector<unsigned, 2> FoldOps;
1086 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1087 return false;
1088
1089 // It's only legal to remat for a use, not a def.
1090 if (ReMat && (MRInfo & VirtRegMap::isMod))
1091 return false;
1092
1093 return tii_->canFoldMemoryOperand(MI, FoldOps);
1094}
1095
1096bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1097 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
1098
1099 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
1100
1101 if (mbb == 0)
1102 return false;
1103
1104 for (++itr; itr != li.ranges.end(); ++itr) {
1105 MachineBasicBlock *mbb2 =
1106 indexes_->getMBBCoveringRange(itr->start, itr->end);
1107
1108 if (mbb2 != mbb)
1109 return false;
1110 }
1111
1112 return true;
1113}
1114
1115/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1116/// interval on to-be re-materialized operands of MI) with new register.
1117void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1118 MachineInstr *MI, unsigned NewVReg,
1119 VirtRegMap &vrm) {
1120 // There is an implicit use. That means one of the other operand is
1121 // being remat'ed and the remat'ed instruction has li.reg as an
1122 // use operand. Make sure we rewrite that as well.
1123 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1124 MachineOperand &MO = MI->getOperand(i);
1125 if (!MO.isReg())
1126 continue;
1127 unsigned Reg = MO.getReg();
1128 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1129 continue;
1130 if (!vrm.isReMaterialized(Reg))
1131 continue;
1132 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1133 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1134 if (UseMO)
1135 UseMO->setReg(NewVReg);
1136 }
1137}
1138
1139/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1140/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1141bool LiveIntervals::
1142rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1143 bool TrySplit, SlotIndex index, SlotIndex end,
1144 MachineInstr *MI,
1145 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1146 unsigned Slot, int LdSlot,
1147 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1148 VirtRegMap &vrm,
1149 const TargetRegisterClass* rc,
1150 SmallVector<int, 4> &ReMatIds,
1151 const MachineLoopInfo *loopInfo,
1152 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1153 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1154 std::vector<LiveInterval*> &NewLIs) {
1155 bool CanFold = false;
1156 RestartInstruction:
1157 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1158 MachineOperand& mop = MI->getOperand(i);
1159 if (!mop.isReg())
1160 continue;
1161 unsigned Reg = mop.getReg();
1162 unsigned RegI = Reg;
1163 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1164 continue;
1165 if (Reg != li.reg)
1166 continue;
1167
1168 bool TryFold = !DefIsReMat;
1169 bool FoldSS = true; // Default behavior unless it's a remat.
1170 int FoldSlot = Slot;
1171 if (DefIsReMat) {
1172 // If this is the rematerializable definition MI itself and
1173 // all of its uses are rematerialized, simply delete it.
1174 if (MI == ReMatOrigDefMI && CanDelete) {
1175 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1176 << MI << '\n');
1177 RemoveMachineInstrFromMaps(MI);
1178 vrm.RemoveMachineInstrFromMaps(MI);
1179 MI->eraseFromParent();
1180 break;
1181 }
1182
1183 // If def for this use can't be rematerialized, then try folding.
1184 // If def is rematerializable and it's a load, also try folding.
1185 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1186 if (isLoad) {
1187 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1188 FoldSS = isLoadSS;
1189 FoldSlot = LdSlot;
1190 }
1191 }
1192
1193 // Scan all of the operands of this instruction rewriting operands
1194 // to use NewVReg instead of li.reg as appropriate. We do this for
1195 // two reasons:
1196 //
1197 // 1. If the instr reads the same spilled vreg multiple times, we
1198 // want to reuse the NewVReg.
1199 // 2. If the instr is a two-addr instruction, we are required to
1200 // keep the src/dst regs pinned.
1201 //
1202 // Keep track of whether we replace a use and/or def so that we can
1203 // create the spill interval with the appropriate range.
1204
1205 HasUse = mop.isUse();
1206 HasDef = mop.isDef();
1207 SmallVector<unsigned, 2> Ops;
1208 Ops.push_back(i);
1209 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1210 const MachineOperand &MOj = MI->getOperand(j);
1211 if (!MOj.isReg())
1212 continue;
1213 unsigned RegJ = MOj.getReg();
1214 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1215 continue;
1216 if (RegJ == RegI) {
1217 Ops.push_back(j);
1218 if (!MOj.isUndef()) {
1219 HasUse |= MOj.isUse();
1220 HasDef |= MOj.isDef();
1221 }
1222 }
1223 }
1224
1225 // Create a new virtual register for the spill interval.
1226 // Create the new register now so we can map the fold instruction
1227 // to the new register so when it is unfolded we get the correct
1228 // answer.
1229 bool CreatedNewVReg = false;
1230 if (NewVReg == 0) {
1231 NewVReg = mri_->createVirtualRegister(rc);
1232 vrm.grow();
1233 CreatedNewVReg = true;
1234 }
1235
1236 if (!TryFold)
1237 CanFold = false;
1238 else {
1239 // Do not fold load / store here if we are splitting. We'll find an
1240 // optimal point to insert a load / store later.
1241 if (!TrySplit) {
1242 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1243 Ops, FoldSS, FoldSlot, NewVReg)) {
1244 // Folding the load/store can completely change the instruction in
1245 // unpredictable ways, rescan it from the beginning.
1246
1247 if (FoldSS) {
1248 // We need to give the new vreg the same stack slot as the
1249 // spilled interval.
1250 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1251 }
1252
1253 HasUse = false;
1254 HasDef = false;
1255 CanFold = false;
1256 if (isNotInMIMap(MI))
1257 break;
1258 goto RestartInstruction;
1259 }
1260 } else {
1261 // We'll try to fold it later if it's profitable.
1262 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1263 }
1264 }
1265
1266 mop.setReg(NewVReg);
1267 if (mop.isImplicit())
1268 rewriteImplicitOps(li, MI, NewVReg, vrm);
1269
1270 // Reuse NewVReg for other reads.
1271 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1272 MachineOperand &mopj = MI->getOperand(Ops[j]);
1273 mopj.setReg(NewVReg);
1274 if (mopj.isImplicit())
1275 rewriteImplicitOps(li, MI, NewVReg, vrm);
1276 }
1277
1278 if (CreatedNewVReg) {
1279 if (DefIsReMat) {
1280 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1281 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1282 // Each valnum may have its own remat id.
1283 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1284 } else {
1285 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1286 }
1287 if (!CanDelete || (HasUse && HasDef)) {
1288 // If this is a two-addr instruction then its use operands are
1289 // rematerializable but its def is not. It should be assigned a
1290 // stack slot.
1291 vrm.assignVirt2StackSlot(NewVReg, Slot);
1292 }
1293 } else {
1294 vrm.assignVirt2StackSlot(NewVReg, Slot);
1295 }
1296 } else if (HasUse && HasDef &&
1297 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1298 // If this interval hasn't been assigned a stack slot (because earlier
1299 // def is a deleted remat def), do it now.
1300 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1301 vrm.assignVirt2StackSlot(NewVReg, Slot);
1302 }
1303
1304 // Re-matting an instruction with virtual register use. Add the
1305 // register as an implicit use on the use MI.
1306 if (DefIsReMat && ImpUse)
1307 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1308
1309 // Create a new register interval for this spill / remat.
1310 LiveInterval &nI = getOrCreateInterval(NewVReg);
1311 if (CreatedNewVReg) {
1312 NewLIs.push_back(&nI);
1313 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1314 if (TrySplit)
1315 vrm.setIsSplitFromReg(NewVReg, li.reg);
1316 }
1317
1318 if (HasUse) {
1319 if (CreatedNewVReg) {
1320 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1321 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1322 DEBUG(errs() << " +" << LR);
1323 nI.addRange(LR);
1324 } else {
1325 // Extend the split live interval to this def / use.
1326 SlotIndex End = index.getDefIndex();
1327 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1328 nI.getValNumInfo(nI.getNumValNums()-1));
1329 DEBUG(errs() << " +" << LR);
1330 nI.addRange(LR);
1331 }
1332 }
1333 if (HasDef) {
1334 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1335 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1336 DEBUG(errs() << " +" << LR);
1337 nI.addRange(LR);
1338 }
1339
1340 DEBUG({
1341 errs() << "\t\t\t\tAdded new interval: ";
1342 nI.print(errs(), tri_);
1343 errs() << '\n';
1344 });
1345 }
1346 return CanFold;
1347}
1348bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1349 const VNInfo *VNI,
1350 MachineBasicBlock *MBB,
1351 SlotIndex Idx) const {
1352 SlotIndex End = getMBBEndIdx(MBB);
1353 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1354 if (VNI->kills[j].isPHI())
1355 continue;
1356
1357 SlotIndex KillIdx = VNI->kills[j];
1358 if (KillIdx > Idx && KillIdx < End)
1359 return true;
1360 }
1361 return false;
1362}
1363
1364/// RewriteInfo - Keep track of machine instrs that will be rewritten
1365/// during spilling.
1366namespace {
1367 struct RewriteInfo {
1368 SlotIndex Index;
1369 MachineInstr *MI;
1370 bool HasUse;
1371 bool HasDef;
1372 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1373 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1374 };
1375
1376 struct RewriteInfoCompare {
1377 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1378 return LHS.Index < RHS.Index;
1379 }
1380 };
1381}
1382
1383void LiveIntervals::
1384rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1385 LiveInterval::Ranges::const_iterator &I,
1386 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1387 unsigned Slot, int LdSlot,
1388 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1389 VirtRegMap &vrm,
1390 const TargetRegisterClass* rc,
1391 SmallVector<int, 4> &ReMatIds,
1392 const MachineLoopInfo *loopInfo,
1393 BitVector &SpillMBBs,
1394 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1395 BitVector &RestoreMBBs,
1396 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1397 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1398 std::vector<LiveInterval*> &NewLIs) {
1399 bool AllCanFold = true;
1400 unsigned NewVReg = 0;
1401 SlotIndex start = I->start.getBaseIndex();
1402 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1403
1404 // First collect all the def / use in this live range that will be rewritten.
1405 // Make sure they are sorted according to instruction index.
1406 std::vector<RewriteInfo> RewriteMIs;
1407 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1408 re = mri_->reg_end(); ri != re; ) {
1409 MachineInstr *MI = &*ri;
1410 MachineOperand &O = ri.getOperand();
1411 ++ri;
1412 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1413 SlotIndex index = getInstructionIndex(MI);
1414 if (index < start || index >= end)
1415 continue;
1416
1417 if (O.isUndef())
1418 // Must be defined by an implicit def. It should not be spilled. Note,
1419 // this is for correctness reason. e.g.
1420 // 8 %reg1024<def> = IMPLICIT_DEF
1421 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1422 // The live range [12, 14) are not part of the r1024 live interval since
1423 // it's defined by an implicit def. It will not conflicts with live
1424 // interval of r1025. Now suppose both registers are spilled, you can
1425 // easily see a situation where both registers are reloaded before
1426 // the INSERT_SUBREG and both target registers that would overlap.
1427 continue;
1428 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1429 }
1430 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1431
1432 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1433 // Now rewrite the defs and uses.
1434 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1435 RewriteInfo &rwi = RewriteMIs[i];
1436 ++i;
1437 SlotIndex index = rwi.Index;
1438 bool MIHasUse = rwi.HasUse;
1439 bool MIHasDef = rwi.HasDef;
1440 MachineInstr *MI = rwi.MI;
1441 // If MI def and/or use the same register multiple times, then there
1442 // are multiple entries.
1443 unsigned NumUses = MIHasUse;
1444 while (i != e && RewriteMIs[i].MI == MI) {
1445 assert(RewriteMIs[i].Index == index);
1446 bool isUse = RewriteMIs[i].HasUse;
1447 if (isUse) ++NumUses;
1448 MIHasUse |= isUse;
1449 MIHasDef |= RewriteMIs[i].HasDef;
1450 ++i;
1451 }
1452 MachineBasicBlock *MBB = MI->getParent();
1453
1454 if (ImpUse && MI != ReMatDefMI) {
1455 // Re-matting an instruction with virtual register use. Update the
1456 // register interval's spill weight to HUGE_VALF to prevent it from
1457 // being spilled.
1458 LiveInterval &ImpLi = getInterval(ImpUse);
1459 ImpLi.weight = HUGE_VALF;
1460 }
1461
1462 unsigned MBBId = MBB->getNumber();
1463 unsigned ThisVReg = 0;
1464 if (TrySplit) {
1465 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1466 if (NVI != MBBVRegsMap.end()) {
1467 ThisVReg = NVI->second;
1468 // One common case:
1469 // x = use
1470 // ...
1471 // ...
1472 // def = ...
1473 // = use
1474 // It's better to start a new interval to avoid artifically
1475 // extend the new interval.
1476 if (MIHasDef && !MIHasUse) {
1477 MBBVRegsMap.erase(MBB->getNumber());
1478 ThisVReg = 0;
1479 }
1480 }
1481 }
1482
1483 bool IsNew = ThisVReg == 0;
1484 if (IsNew) {
1485 // This ends the previous live interval. If all of its def / use
1486 // can be folded, give it a low spill weight.
1487 if (NewVReg && TrySplit && AllCanFold) {
1488 LiveInterval &nI = getOrCreateInterval(NewVReg);
1489 nI.weight /= 10.0F;
1490 }
1491 AllCanFold = true;
1492 }
1493 NewVReg = ThisVReg;
1494
1495 bool HasDef = false;
1496 bool HasUse = false;
1497 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1498 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1499 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1500 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1501 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1502 if (!HasDef && !HasUse)
1503 continue;
1504
1505 AllCanFold &= CanFold;
1506
1507 // Update weight of spill interval.
1508 LiveInterval &nI = getOrCreateInterval(NewVReg);
1509 if (!TrySplit) {
1510 // The spill weight is now infinity as it cannot be spilled again.
1511 nI.weight = HUGE_VALF;
1512 continue;
1513 }
1514
1515 // Keep track of the last def and first use in each MBB.
1516 if (HasDef) {
1517 if (MI != ReMatOrigDefMI || !CanDelete) {
1518 bool HasKill = false;
1519 if (!HasUse)
1520 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1521 else {
1522 // If this is a two-address code, then this index starts a new VNInfo.
1523 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1524 if (VNI)
1525 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1526 }
1527 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1528 SpillIdxes.find(MBBId);
1529 if (!HasKill) {
1530 if (SII == SpillIdxes.end()) {
1531 std::vector<SRInfo> S;
1532 S.push_back(SRInfo(index, NewVReg, true));
1533 SpillIdxes.insert(std::make_pair(MBBId, S));
1534 } else if (SII->second.back().vreg != NewVReg) {
1535 SII->second.push_back(SRInfo(index, NewVReg, true));
1536 } else if (index > SII->second.back().index) {
1537 // If there is an earlier def and this is a two-address
1538 // instruction, then it's not possible to fold the store (which
1539 // would also fold the load).
1540 SRInfo &Info = SII->second.back();
1541 Info.index = index;
1542 Info.canFold = !HasUse;
1543 }
1544 SpillMBBs.set(MBBId);
1545 } else if (SII != SpillIdxes.end() &&
1546 SII->second.back().vreg == NewVReg &&
1547 index > SII->second.back().index) {
1548 // There is an earlier def that's not killed (must be two-address).
1549 // The spill is no longer needed.
1550 SII->second.pop_back();
1551 if (SII->second.empty()) {
1552 SpillIdxes.erase(MBBId);
1553 SpillMBBs.reset(MBBId);
1554 }
1555 }
1556 }
1557 }
1558
1559 if (HasUse) {
1560 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1561 SpillIdxes.find(MBBId);
1562 if (SII != SpillIdxes.end() &&
1563 SII->second.back().vreg == NewVReg &&
1564 index > SII->second.back().index)
1565 // Use(s) following the last def, it's not safe to fold the spill.
1566 SII->second.back().canFold = false;
1567 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1568 RestoreIdxes.find(MBBId);
1569 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1570 // If we are splitting live intervals, only fold if it's the first
1571 // use and there isn't another use later in the MBB.
1572 RII->second.back().canFold = false;
1573 else if (IsNew) {
1574 // Only need a reload if there isn't an earlier def / use.
1575 if (RII == RestoreIdxes.end()) {
1576 std::vector<SRInfo> Infos;
1577 Infos.push_back(SRInfo(index, NewVReg, true));
1578 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1579 } else {
1580 RII->second.push_back(SRInfo(index, NewVReg, true));
1581 }
1582 RestoreMBBs.set(MBBId);
1583 }
1584 }
1585
1586 // Update spill weight.
1587 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1588 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1589 }
1590
1591 if (NewVReg && TrySplit && AllCanFold) {
1592 // If all of its def / use can be folded, give it a low spill weight.
1593 LiveInterval &nI = getOrCreateInterval(NewVReg);
1594 nI.weight /= 10.0F;
1595 }
1596}
1597
1598bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1599 unsigned vr, BitVector &RestoreMBBs,
1600 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1601 if (!RestoreMBBs[Id])
1602 return false;
1603 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1604 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1605 if (Restores[i].index == index &&
1606 Restores[i].vreg == vr &&
1607 Restores[i].canFold)
1608 return true;
1609 return false;
1610}
1611
1612void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1613 unsigned vr, BitVector &RestoreMBBs,
1614 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1615 if (!RestoreMBBs[Id])
1616 return;
1617 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1618 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1619 if (Restores[i].index == index && Restores[i].vreg)
1620 Restores[i].index = SlotIndex();
1621}
1622
1623/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1624/// spilled and create empty intervals for their uses.
1625void
1626LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1627 const TargetRegisterClass* rc,
1628 std::vector<LiveInterval*> &NewLIs) {
1629 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1630 re = mri_->reg_end(); ri != re; ) {
1631 MachineOperand &O = ri.getOperand();
1632 MachineInstr *MI = &*ri;
1633 ++ri;
1634 if (O.isDef()) {
1635 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1636 "Register def was not rewritten?");
1637 RemoveMachineInstrFromMaps(MI);
1638 vrm.RemoveMachineInstrFromMaps(MI);
1639 MI->eraseFromParent();
1640 } else {
1641 // This must be an use of an implicit_def so it's not part of the live
1642 // interval. Create a new empty live interval for it.
1643 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1644 unsigned NewVReg = mri_->createVirtualRegister(rc);
1645 vrm.grow();
1646 vrm.setIsImplicitlyDefined(NewVReg);
1647 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1648 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1649 MachineOperand &MO = MI->getOperand(i);
1650 if (MO.isReg() && MO.getReg() == li.reg) {
1651 MO.setReg(NewVReg);
1652 MO.setIsUndef();
1653 }
1654 }
1655 }
1656 }
1657}
1658
1659std::vector<LiveInterval*> LiveIntervals::
1660addIntervalsForSpillsFast(const LiveInterval &li,
1661 const MachineLoopInfo *loopInfo,
1662 VirtRegMap &vrm) {
1663 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1664
1665 std::vector<LiveInterval*> added;
1666
1667 assert(li.weight != HUGE_VALF &&
1668 "attempt to spill already spilled interval!");
1669
1670 DEBUG({
1671 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1672 li.dump();
1673 errs() << '\n';
1674 });
1675
1676 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1677
1678 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1679 while (RI != mri_->reg_end()) {
1680 MachineInstr* MI = &*RI;
1681
1682 SmallVector<unsigned, 2> Indices;
1683 bool HasUse = false;
1684 bool HasDef = false;
1685
1686 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1687 MachineOperand& mop = MI->getOperand(i);
1688 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1689
1690 HasUse |= MI->getOperand(i).isUse();
1691 HasDef |= MI->getOperand(i).isDef();
1692
1693 Indices.push_back(i);
1694 }
1695
1696 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1697 Indices, true, slot, li.reg)) {
1698 unsigned NewVReg = mri_->createVirtualRegister(rc);
1699 vrm.grow();
1700 vrm.assignVirt2StackSlot(NewVReg, slot);
1701
1702 // create a new register for this spill
1703 LiveInterval &nI = getOrCreateInterval(NewVReg);
1704
1705 // the spill weight is now infinity as it
1706 // cannot be spilled again
1707 nI.weight = HUGE_VALF;
1708
1709 // Rewrite register operands to use the new vreg.
1710 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1711 E = Indices.end(); I != E; ++I) {
1712 MI->getOperand(*I).setReg(NewVReg);
1713
1714 if (MI->getOperand(*I).isUse())
1715 MI->getOperand(*I).setIsKill(true);
1716 }
1717
1718 // Fill in the new live interval.
1719 SlotIndex index = getInstructionIndex(MI);
1720 if (HasUse) {
1721 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1722 nI.getNextValue(SlotIndex(), 0, false,
1723 getVNInfoAllocator()));
1724 DEBUG(errs() << " +" << LR);
1725 nI.addRange(LR);
1726 vrm.addRestorePoint(NewVReg, MI);
1727 }
1728 if (HasDef) {
1729 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1730 nI.getNextValue(SlotIndex(), 0, false,
1731 getVNInfoAllocator()));
1732 DEBUG(errs() << " +" << LR);
1733 nI.addRange(LR);
1734 vrm.addSpillPoint(NewVReg, true, MI);
1735 }
1736
1737 added.push_back(&nI);
1738
1739 DEBUG({
1740 errs() << "\t\t\t\tadded new interval: ";
1741 nI.dump();
1742 errs() << '\n';
1743 });
1744 }
1745
1746
1747 RI = mri_->reg_begin(li.reg);
1748 }
1749
1750 return added;
1751}
1752
1753std::vector<LiveInterval*> LiveIntervals::
1754addIntervalsForSpills(const LiveInterval &li,
1755 SmallVectorImpl<LiveInterval*> &SpillIs,
1756 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1757
1758 if (EnableFastSpilling)
1759 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1760
1761 assert(li.weight != HUGE_VALF &&
1762 "attempt to spill already spilled interval!");
1763
1764 DEBUG({
1765 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1766 li.print(errs(), tri_);
1767 errs() << '\n';
1768 });
1769
1770 // Each bit specify whether a spill is required in the MBB.
1771 BitVector SpillMBBs(mf_->getNumBlockIDs());
1772 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1773 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1774 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1775 DenseMap<unsigned,unsigned> MBBVRegsMap;
1776 std::vector<LiveInterval*> NewLIs;
1777 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1778
1779 unsigned NumValNums = li.getNumValNums();
1780 SmallVector<MachineInstr*, 4> ReMatDefs;
1781 ReMatDefs.resize(NumValNums, NULL);
1782 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1783 ReMatOrigDefs.resize(NumValNums, NULL);
1784 SmallVector<int, 4> ReMatIds;
1785 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1786 BitVector ReMatDelete(NumValNums);
1787 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1788
1789 // Spilling a split live interval. It cannot be split any further. Also,
1790 // it's also guaranteed to be a single val# / range interval.
1791 if (vrm.getPreSplitReg(li.reg)) {
1792 vrm.setIsSplitFromReg(li.reg, 0);
1793 // Unset the split kill marker on the last use.
1794 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1795 if (KillIdx != SlotIndex()) {
1796 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1797 assert(KillMI && "Last use disappeared?");
1798 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1799 assert(KillOp != -1 && "Last use disappeared?");
1800 KillMI->getOperand(KillOp).setIsKill(false);
1801 }
1802 vrm.removeKillPoint(li.reg);
1803 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1804 Slot = vrm.getStackSlot(li.reg);
1805 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1806 MachineInstr *ReMatDefMI = DefIsReMat ?
1807 vrm.getReMaterializedMI(li.reg) : NULL;
1808 int LdSlot = 0;
1809 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810 bool isLoad = isLoadSS ||
1811 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1812 bool IsFirstRange = true;
1813 for (LiveInterval::Ranges::const_iterator
1814 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1815 // If this is a split live interval with multiple ranges, it means there
1816 // are two-address instructions that re-defined the value. Only the
1817 // first def can be rematerialized!
1818 if (IsFirstRange) {
1819 // Note ReMatOrigDefMI has already been deleted.
1820 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1821 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1822 false, vrm, rc, ReMatIds, loopInfo,
1823 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1824 MBBVRegsMap, NewLIs);
1825 } else {
1826 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1827 Slot, 0, false, false, false,
1828 false, vrm, rc, ReMatIds, loopInfo,
1829 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1830 MBBVRegsMap, NewLIs);
1831 }
1832 IsFirstRange = false;
1833 }
1834
1835 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1836 return NewLIs;
1837 }
1838
1839 bool TrySplit = !intervalIsInOneMBB(li);
1840 if (TrySplit)
1841 ++numSplits;
1842 bool NeedStackSlot = false;
1843 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1844 i != e; ++i) {
1845 const VNInfo *VNI = *i;
1846 unsigned VN = VNI->id;
1847 if (VNI->isUnused())
1848 continue; // Dead val#.
1849 // Is the def for the val# rematerializable?
1850 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1851 ? getInstructionFromIndex(VNI->def) : 0;
1852 bool dummy;
1853 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1854 // Remember how to remat the def of this val#.
1855 ReMatOrigDefs[VN] = ReMatDefMI;
1856 // Original def may be modified so we have to make a copy here.
1857 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1858 CloneMIs.push_back(Clone);
1859 ReMatDefs[VN] = Clone;
1860
1861 bool CanDelete = true;
1862 if (VNI->hasPHIKill()) {
1863 // A kill is a phi node, not all of its uses can be rematerialized.
1864 // It must not be deleted.
1865 CanDelete = false;
1866 // Need a stack slot if there is any live range where uses cannot be
1867 // rematerialized.
1868 NeedStackSlot = true;
1869 }
1870 if (CanDelete)
1871 ReMatDelete.set(VN);
1872 } else {
1873 // Need a stack slot if there is any live range where uses cannot be
1874 // rematerialized.
1875 NeedStackSlot = true;
1876 }
1877 }
1878
1879 // One stack slot per live interval.
1880 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1881 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1882 Slot = vrm.assignVirt2StackSlot(li.reg);
1883
1884 // This case only occurs when the prealloc splitter has already assigned
1885 // a stack slot to this vreg.
1886 else
1887 Slot = vrm.getStackSlot(li.reg);
1888 }
1889
1890 // Create new intervals and rewrite defs and uses.
1891 for (LiveInterval::Ranges::const_iterator
1892 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1893 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1894 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1895 bool DefIsReMat = ReMatDefMI != NULL;
1896 bool CanDelete = ReMatDelete[I->valno->id];
1897 int LdSlot = 0;
1898 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1899 bool isLoad = isLoadSS ||
1900 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1901 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1902 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1903 CanDelete, vrm, rc, ReMatIds, loopInfo,
1904 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1905 MBBVRegsMap, NewLIs);
1906 }
1907
1908 // Insert spills / restores if we are splitting.
1909 if (!TrySplit) {
1910 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1911 return NewLIs;
1912 }
1913
1914 SmallPtrSet<LiveInterval*, 4> AddedKill;
1915 SmallVector<unsigned, 2> Ops;
1916 if (NeedStackSlot) {
1917 int Id = SpillMBBs.find_first();
1918 while (Id != -1) {
1919 std::vector<SRInfo> &spills = SpillIdxes[Id];
1920 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1921 SlotIndex index = spills[i].index;
1922 unsigned VReg = spills[i].vreg;
1923 LiveInterval &nI = getOrCreateInterval(VReg);
1924 bool isReMat = vrm.isReMaterialized(VReg);
1925 MachineInstr *MI = getInstructionFromIndex(index);
1926 bool CanFold = false;
1927 bool FoundUse = false;
1928 Ops.clear();
1929 if (spills[i].canFold) {
1930 CanFold = true;
1931 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1932 MachineOperand &MO = MI->getOperand(j);
1933 if (!MO.isReg() || MO.getReg() != VReg)
1934 continue;
1935
1936 Ops.push_back(j);
1937 if (MO.isDef())
1938 continue;
1939 if (isReMat ||
1940 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1941 RestoreMBBs, RestoreIdxes))) {
1942 // MI has two-address uses of the same register. If the use
1943 // isn't the first and only use in the BB, then we can't fold
1944 // it. FIXME: Move this to rewriteInstructionsForSpills.
1945 CanFold = false;
1946 break;
1947 }
1948 FoundUse = true;
1949 }
1950 }
1951 // Fold the store into the def if possible.
1952 bool Folded = false;
1953 if (CanFold && !Ops.empty()) {
1954 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1955 Folded = true;
1956 if (FoundUse) {
1957 // Also folded uses, do not issue a load.
1958 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1959 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1960 }
1961 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1962 }
1963 }
1964
1965 // Otherwise tell the spiller to issue a spill.
1966 if (!Folded) {
1967 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1968 bool isKill = LR->end == index.getStoreIndex();
1969 if (!MI->registerDefIsDead(nI.reg))
1970 // No need to spill a dead def.
1971 vrm.addSpillPoint(VReg, isKill, MI);
1972 if (isKill)
1973 AddedKill.insert(&nI);
1974 }
1975 }
1976 Id = SpillMBBs.find_next(Id);
1977 }
1978 }
1979
1980 int Id = RestoreMBBs.find_first();
1981 while (Id != -1) {
1982 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1983 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1984 SlotIndex index = restores[i].index;
1985 if (index == SlotIndex())
1986 continue;
1987 unsigned VReg = restores[i].vreg;
1988 LiveInterval &nI = getOrCreateInterval(VReg);
1989 bool isReMat = vrm.isReMaterialized(VReg);
1990 MachineInstr *MI = getInstructionFromIndex(index);
1991 bool CanFold = false;
1992 Ops.clear();
1993 if (restores[i].canFold) {
1994 CanFold = true;
1995 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1996 MachineOperand &MO = MI->getOperand(j);
1997 if (!MO.isReg() || MO.getReg() != VReg)
1998 continue;
1999
2000 if (MO.isDef()) {
2001 // If this restore were to be folded, it would have been folded
2002 // already.
2003 CanFold = false;
2004 break;
2005 }
2006 Ops.push_back(j);
2007 }
2008 }
2009
2010 // Fold the load into the use if possible.
2011 bool Folded = false;
2012 if (CanFold && !Ops.empty()) {
2013 if (!isReMat)
2014 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2015 else {
2016 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2017 int LdSlot = 0;
2018 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2019 // If the rematerializable def is a load, also try to fold it.
2020 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2021 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2022 Ops, isLoadSS, LdSlot, VReg);
2023 if (!Folded) {
2024 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2025 if (ImpUse) {
2026 // Re-matting an instruction with virtual register use. Add the
2027 // register as an implicit use on the use MI and update the register
2028 // interval's spill weight to HUGE_VALF to prevent it from being
2029 // spilled.
2030 LiveInterval &ImpLi = getInterval(ImpUse);
2031 ImpLi.weight = HUGE_VALF;
2032 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2033 }
2034 }
2035 }
2036 }
2037 // If folding is not possible / failed, then tell the spiller to issue a
2038 // load / rematerialization for us.
2039 if (Folded)
2040 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
2041 else
2042 vrm.addRestorePoint(VReg, MI);
2043 }
2044 Id = RestoreMBBs.find_next(Id);
2045 }
2046
2047 // Finalize intervals: add kills, finalize spill weights, and filter out
2048 // dead intervals.
2049 std::vector<LiveInterval*> RetNewLIs;
2050 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2051 LiveInterval *LI = NewLIs[i];
2052 if (!LI->empty()) {
2053 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
2054 if (!AddedKill.count(LI)) {
2055 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2056 SlotIndex LastUseIdx = LR->end.getBaseIndex();
2057 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2058 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2059 assert(UseIdx != -1);
2060 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2061 LastUse->getOperand(UseIdx).setIsKill();
2062 vrm.addKillPoint(LI->reg, LastUseIdx);
2063 }
2064 }
2065 RetNewLIs.push_back(LI);
2066 }
2067 }
2068
2069 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2070 return RetNewLIs;
2071}
2072
2073/// hasAllocatableSuperReg - Return true if the specified physical register has
2074/// any super register that's allocatable.
2075bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2076 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2077 if (allocatableRegs_[*AS] && hasInterval(*AS))
2078 return true;
2079 return false;
2080}
2081
2082/// getRepresentativeReg - Find the largest super register of the specified
2083/// physical register.
2084unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2085 // Find the largest super-register that is allocatable.
2086 unsigned BestReg = Reg;
2087 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2088 unsigned SuperReg = *AS;
2089 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2090 BestReg = SuperReg;
2091 break;
2092 }
2093 }
2094 return BestReg;
2095}
2096
2097/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2098/// specified interval that conflicts with the specified physical register.
2099unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2100 unsigned PhysReg) const {
2101 unsigned NumConflicts = 0;
2102 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2103 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2104 E = mri_->reg_end(); I != E; ++I) {
2105 MachineOperand &O = I.getOperand();
2106 MachineInstr *MI = O.getParent();
2107 SlotIndex Index = getInstructionIndex(MI);
2108 if (pli.liveAt(Index))
2109 ++NumConflicts;
2110 }
2111 return NumConflicts;
2112}
2113
2114/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2115/// around all defs and uses of the specified interval. Return true if it
2116/// was able to cut its interval.
2117bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2118 unsigned PhysReg, VirtRegMap &vrm) {
2119 unsigned SpillReg = getRepresentativeReg(PhysReg);
2120
2121 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2122 // If there are registers which alias PhysReg, but which are not a
2123 // sub-register of the chosen representative super register. Assert
2124 // since we can't handle it yet.
2125 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2126 tri_->isSuperRegister(*AS, SpillReg));
2127
2128 bool Cut = false;
2129 SmallVector<unsigned, 4> PRegs;
2130 if (hasInterval(SpillReg))
2131 PRegs.push_back(SpillReg);
2132 else {
2133 SmallSet<unsigned, 4> Added;
2134 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2135 if (Added.insert(*AS) && hasInterval(*AS)) {
2136 PRegs.push_back(*AS);
2137 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2138 Added.insert(*ASS);
2139 }
2140 }
2141
2142 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2143 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2144 E = mri_->reg_end(); I != E; ++I) {
2145 MachineOperand &O = I.getOperand();
2146 MachineInstr *MI = O.getParent();
2147 if (SeenMIs.count(MI))
2148 continue;
2149 SeenMIs.insert(MI);
2150 SlotIndex Index = getInstructionIndex(MI);
2151 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2152 unsigned PReg = PRegs[i];
2153 LiveInterval &pli = getInterval(PReg);
2154 if (!pli.liveAt(Index))
2155 continue;
2156 vrm.addEmergencySpill(PReg, MI);
2157 SlotIndex StartIdx = Index.getLoadIndex();
2158 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2159 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2160 pli.removeRange(StartIdx, EndIdx);
2161 Cut = true;
2162 } else {
2163 std::string msg;
2164 raw_string_ostream Msg(msg);
2165 Msg << "Ran out of registers during register allocation!";
2166 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2167 Msg << "\nPlease check your inline asm statement for invalid "
2168 << "constraints:\n";
2169 MI->print(Msg, tm_);
2170 }
2171 llvm_report_error(Msg.str());
2172 }
2173 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2174 if (!hasInterval(*AS))
2175 continue;
2176 LiveInterval &spli = getInterval(*AS);
2177 if (spli.liveAt(Index))
2178 spli.removeRange(Index.getLoadIndex(),
2179 Index.getNextIndex().getBaseIndex());
2180 }
2181 }
2182 }
2183 return Cut;
2184}
2185
2186LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2187 MachineInstr* startInst) {
2188 LiveInterval& Interval = getOrCreateInterval(reg);
2189 VNInfo* VN = Interval.getNextValue(
2190 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2191 startInst, true, getVNInfoAllocator());
2192 VN->setHasPHIKill(true);
2193 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2194 LiveRange LR(
2195 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2196 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2197 Interval.addRange(LR);
2198
2199 return LR;
2200}
2201
646/// computeIntervals - computes the live intervals for virtual
647/// registers. for some ordering of the machine instructions [1,N] a
648/// live interval is an interval [i, j) where 1 <= i <= j < N for
649/// which a variable is live
650void LiveIntervals::computeIntervals() {
651 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
652 << "********** Function: "
653 << ((Value*)mf_->getFunction())->getName() << '\n');
654
655 SmallVector<unsigned, 8> UndefUses;
656 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
657 MBBI != E; ++MBBI) {
658 MachineBasicBlock *MBB = MBBI;
659 // Track the index of the current machine instr.
660 SlotIndex MIIndex = getMBBStartIdx(MBB);
661 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
662
663 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
664
665 // Create intervals for live-ins to this BB first.
666 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
667 LE = MBB->livein_end(); LI != LE; ++LI) {
668 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
669 // Multiple live-ins can alias the same register.
670 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
671 if (!hasInterval(*AS))
672 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
673 true);
674 }
675
676 // Skip over empty initial indices.
677 if (getInstructionFromIndex(MIIndex) == 0)
678 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
679
680 for (; MI != miEnd; ++MI) {
681 DEBUG(errs() << MIIndex << "\t" << *MI);
682
683 // Handle defs.
684 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
685 MachineOperand &MO = MI->getOperand(i);
686 if (!MO.isReg() || !MO.getReg())
687 continue;
688
689 // handle register defs - build intervals
690 if (MO.isDef())
691 handleRegisterDef(MBB, MI, MIIndex, MO, i);
692 else if (MO.isUndef())
693 UndefUses.push_back(MO.getReg());
694 }
695
696 // Move to the next instr slot.
697 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
698 }
699 }
700
701 // Create empty intervals for registers defined by implicit_def's (except
702 // for those implicit_def that define values which are liveout of their
703 // blocks.
704 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
705 unsigned UndefReg = UndefUses[i];
706 (void)getOrCreateInterval(UndefReg);
707 }
708}
709
710LiveInterval* LiveIntervals::createInterval(unsigned reg) {
711 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
712 return new LiveInterval(reg, Weight);
713}
714
715/// dupInterval - Duplicate a live interval. The caller is responsible for
716/// managing the allocated memory.
717LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
718 LiveInterval *NewLI = createInterval(li->reg);
719 NewLI->Copy(*li, mri_, getVNInfoAllocator());
720 return NewLI;
721}
722
723/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
724/// copy field and returns the source register that defines it.
725unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
726 if (!VNI->getCopy())
727 return 0;
728
729 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
730 // If it's extracting out of a physical register, return the sub-register.
731 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
732 if (TargetRegisterInfo::isPhysicalRegister(Reg))
733 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
734 return Reg;
735 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
736 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
737 return VNI->getCopy()->getOperand(2).getReg();
738
739 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
740 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
741 return SrcReg;
742 llvm_unreachable("Unrecognized copy instruction!");
743 return 0;
744}
745
746//===----------------------------------------------------------------------===//
747// Register allocator hooks.
748//
749
750/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
751/// allow one) virtual register operand, then its uses are implicitly using
752/// the register. Returns the virtual register.
753unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
754 MachineInstr *MI) const {
755 unsigned RegOp = 0;
756 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
757 MachineOperand &MO = MI->getOperand(i);
758 if (!MO.isReg() || !MO.isUse())
759 continue;
760 unsigned Reg = MO.getReg();
761 if (Reg == 0 || Reg == li.reg)
762 continue;
763
764 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
765 !allocatableRegs_[Reg])
766 continue;
767 // FIXME: For now, only remat MI with at most one register operand.
768 assert(!RegOp &&
769 "Can't rematerialize instruction with multiple register operand!");
770 RegOp = MO.getReg();
771#ifndef NDEBUG
772 break;
773#endif
774 }
775 return RegOp;
776}
777
778/// isValNoAvailableAt - Return true if the val# of the specified interval
779/// which reaches the given instruction also reaches the specified use index.
780bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
781 SlotIndex UseIdx) const {
782 SlotIndex Index = getInstructionIndex(MI);
783 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
784 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
785 return UI != li.end() && UI->valno == ValNo;
786}
787
788/// isReMaterializable - Returns true if the definition MI of the specified
789/// val# of the specified interval is re-materializable.
790bool LiveIntervals::isReMaterializable(const LiveInterval &li,
791 const VNInfo *ValNo, MachineInstr *MI,
792 SmallVectorImpl<LiveInterval*> &SpillIs,
793 bool &isLoad) {
794 if (DisableReMat)
795 return false;
796
797 if (!tii_->isTriviallyReMaterializable(MI, aa_))
798 return false;
799
800 // Target-specific code can mark an instruction as being rematerializable
801 // if it has one virtual reg use, though it had better be something like
802 // a PIC base register which is likely to be live everywhere.
803 unsigned ImpUse = getReMatImplicitUse(li, MI);
804 if (ImpUse) {
805 const LiveInterval &ImpLi = getInterval(ImpUse);
806 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
807 re = mri_->use_end(); ri != re; ++ri) {
808 MachineInstr *UseMI = &*ri;
809 SlotIndex UseIdx = getInstructionIndex(UseMI);
810 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
811 continue;
812 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
813 return false;
814 }
815
816 // If a register operand of the re-materialized instruction is going to
817 // be spilled next, then it's not legal to re-materialize this instruction.
818 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
819 if (ImpUse == SpillIs[i]->reg)
820 return false;
821 }
822 return true;
823}
824
825/// isReMaterializable - Returns true if the definition MI of the specified
826/// val# of the specified interval is re-materializable.
827bool LiveIntervals::isReMaterializable(const LiveInterval &li,
828 const VNInfo *ValNo, MachineInstr *MI) {
829 SmallVector<LiveInterval*, 4> Dummy1;
830 bool Dummy2;
831 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
832}
833
834/// isReMaterializable - Returns true if every definition of MI of every
835/// val# of the specified interval is re-materializable.
836bool LiveIntervals::isReMaterializable(const LiveInterval &li,
837 SmallVectorImpl<LiveInterval*> &SpillIs,
838 bool &isLoad) {
839 isLoad = false;
840 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
841 i != e; ++i) {
842 const VNInfo *VNI = *i;
843 if (VNI->isUnused())
844 continue; // Dead val#.
845 // Is the def for the val# rematerializable?
846 if (!VNI->isDefAccurate())
847 return false;
848 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
849 bool DefIsLoad = false;
850 if (!ReMatDefMI ||
851 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
852 return false;
853 isLoad |= DefIsLoad;
854 }
855 return true;
856}
857
858/// FilterFoldedOps - Filter out two-address use operands. Return
859/// true if it finds any issue with the operands that ought to prevent
860/// folding.
861static bool FilterFoldedOps(MachineInstr *MI,
862 SmallVector<unsigned, 2> &Ops,
863 unsigned &MRInfo,
864 SmallVector<unsigned, 2> &FoldOps) {
865 MRInfo = 0;
866 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
867 unsigned OpIdx = Ops[i];
868 MachineOperand &MO = MI->getOperand(OpIdx);
869 // FIXME: fold subreg use.
870 if (MO.getSubReg())
871 return true;
872 if (MO.isDef())
873 MRInfo |= (unsigned)VirtRegMap::isMod;
874 else {
875 // Filter out two-address use operand(s).
876 if (MI->isRegTiedToDefOperand(OpIdx)) {
877 MRInfo = VirtRegMap::isModRef;
878 continue;
879 }
880 MRInfo |= (unsigned)VirtRegMap::isRef;
881 }
882 FoldOps.push_back(OpIdx);
883 }
884 return false;
885}
886
887
888/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
889/// slot / to reg or any rematerialized load into ith operand of specified
890/// MI. If it is successul, MI is updated with the newly created MI and
891/// returns true.
892bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
893 VirtRegMap &vrm, MachineInstr *DefMI,
894 SlotIndex InstrIdx,
895 SmallVector<unsigned, 2> &Ops,
896 bool isSS, int Slot, unsigned Reg) {
897 // If it is an implicit def instruction, just delete it.
898 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
899 RemoveMachineInstrFromMaps(MI);
900 vrm.RemoveMachineInstrFromMaps(MI);
901 MI->eraseFromParent();
902 ++numFolds;
903 return true;
904 }
905
906 // Filter the list of operand indexes that are to be folded. Abort if
907 // any operand will prevent folding.
908 unsigned MRInfo = 0;
909 SmallVector<unsigned, 2> FoldOps;
910 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
911 return false;
912
913 // The only time it's safe to fold into a two address instruction is when
914 // it's folding reload and spill from / into a spill stack slot.
915 if (DefMI && (MRInfo & VirtRegMap::isMod))
916 return false;
917
918 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
919 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
920 if (fmi) {
921 // Remember this instruction uses the spill slot.
922 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
923
924 // Attempt to fold the memory reference into the instruction. If
925 // we can do this, we don't need to insert spill code.
926 MachineBasicBlock &MBB = *MI->getParent();
927 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
928 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
929 vrm.transferSpillPts(MI, fmi);
930 vrm.transferRestorePts(MI, fmi);
931 vrm.transferEmergencySpills(MI, fmi);
932 ReplaceMachineInstrInMaps(MI, fmi);
933 MI = MBB.insert(MBB.erase(MI), fmi);
934 ++numFolds;
935 return true;
936 }
937 return false;
938}
939
940/// canFoldMemoryOperand - Returns true if the specified load / store
941/// folding is possible.
942bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
943 SmallVector<unsigned, 2> &Ops,
944 bool ReMat) const {
945 // Filter the list of operand indexes that are to be folded. Abort if
946 // any operand will prevent folding.
947 unsigned MRInfo = 0;
948 SmallVector<unsigned, 2> FoldOps;
949 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
950 return false;
951
952 // It's only legal to remat for a use, not a def.
953 if (ReMat && (MRInfo & VirtRegMap::isMod))
954 return false;
955
956 return tii_->canFoldMemoryOperand(MI, FoldOps);
957}
958
959bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
960 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
961
962 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
963
964 if (mbb == 0)
965 return false;
966
967 for (++itr; itr != li.ranges.end(); ++itr) {
968 MachineBasicBlock *mbb2 =
969 indexes_->getMBBCoveringRange(itr->start, itr->end);
970
971 if (mbb2 != mbb)
972 return false;
973 }
974
975 return true;
976}
977
978/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
979/// interval on to-be re-materialized operands of MI) with new register.
980void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
981 MachineInstr *MI, unsigned NewVReg,
982 VirtRegMap &vrm) {
983 // There is an implicit use. That means one of the other operand is
984 // being remat'ed and the remat'ed instruction has li.reg as an
985 // use operand. Make sure we rewrite that as well.
986 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
987 MachineOperand &MO = MI->getOperand(i);
988 if (!MO.isReg())
989 continue;
990 unsigned Reg = MO.getReg();
991 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
992 continue;
993 if (!vrm.isReMaterialized(Reg))
994 continue;
995 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
996 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
997 if (UseMO)
998 UseMO->setReg(NewVReg);
999 }
1000}
1001
1002/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1003/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1004bool LiveIntervals::
1005rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1006 bool TrySplit, SlotIndex index, SlotIndex end,
1007 MachineInstr *MI,
1008 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1009 unsigned Slot, int LdSlot,
1010 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1011 VirtRegMap &vrm,
1012 const TargetRegisterClass* rc,
1013 SmallVector<int, 4> &ReMatIds,
1014 const MachineLoopInfo *loopInfo,
1015 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1016 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1017 std::vector<LiveInterval*> &NewLIs) {
1018 bool CanFold = false;
1019 RestartInstruction:
1020 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1021 MachineOperand& mop = MI->getOperand(i);
1022 if (!mop.isReg())
1023 continue;
1024 unsigned Reg = mop.getReg();
1025 unsigned RegI = Reg;
1026 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1027 continue;
1028 if (Reg != li.reg)
1029 continue;
1030
1031 bool TryFold = !DefIsReMat;
1032 bool FoldSS = true; // Default behavior unless it's a remat.
1033 int FoldSlot = Slot;
1034 if (DefIsReMat) {
1035 // If this is the rematerializable definition MI itself and
1036 // all of its uses are rematerialized, simply delete it.
1037 if (MI == ReMatOrigDefMI && CanDelete) {
1038 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1039 << MI << '\n');
1040 RemoveMachineInstrFromMaps(MI);
1041 vrm.RemoveMachineInstrFromMaps(MI);
1042 MI->eraseFromParent();
1043 break;
1044 }
1045
1046 // If def for this use can't be rematerialized, then try folding.
1047 // If def is rematerializable and it's a load, also try folding.
1048 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1049 if (isLoad) {
1050 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1051 FoldSS = isLoadSS;
1052 FoldSlot = LdSlot;
1053 }
1054 }
1055
1056 // Scan all of the operands of this instruction rewriting operands
1057 // to use NewVReg instead of li.reg as appropriate. We do this for
1058 // two reasons:
1059 //
1060 // 1. If the instr reads the same spilled vreg multiple times, we
1061 // want to reuse the NewVReg.
1062 // 2. If the instr is a two-addr instruction, we are required to
1063 // keep the src/dst regs pinned.
1064 //
1065 // Keep track of whether we replace a use and/or def so that we can
1066 // create the spill interval with the appropriate range.
1067
1068 HasUse = mop.isUse();
1069 HasDef = mop.isDef();
1070 SmallVector<unsigned, 2> Ops;
1071 Ops.push_back(i);
1072 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1073 const MachineOperand &MOj = MI->getOperand(j);
1074 if (!MOj.isReg())
1075 continue;
1076 unsigned RegJ = MOj.getReg();
1077 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1078 continue;
1079 if (RegJ == RegI) {
1080 Ops.push_back(j);
1081 if (!MOj.isUndef()) {
1082 HasUse |= MOj.isUse();
1083 HasDef |= MOj.isDef();
1084 }
1085 }
1086 }
1087
1088 // Create a new virtual register for the spill interval.
1089 // Create the new register now so we can map the fold instruction
1090 // to the new register so when it is unfolded we get the correct
1091 // answer.
1092 bool CreatedNewVReg = false;
1093 if (NewVReg == 0) {
1094 NewVReg = mri_->createVirtualRegister(rc);
1095 vrm.grow();
1096 CreatedNewVReg = true;
1097 }
1098
1099 if (!TryFold)
1100 CanFold = false;
1101 else {
1102 // Do not fold load / store here if we are splitting. We'll find an
1103 // optimal point to insert a load / store later.
1104 if (!TrySplit) {
1105 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1106 Ops, FoldSS, FoldSlot, NewVReg)) {
1107 // Folding the load/store can completely change the instruction in
1108 // unpredictable ways, rescan it from the beginning.
1109
1110 if (FoldSS) {
1111 // We need to give the new vreg the same stack slot as the
1112 // spilled interval.
1113 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1114 }
1115
1116 HasUse = false;
1117 HasDef = false;
1118 CanFold = false;
1119 if (isNotInMIMap(MI))
1120 break;
1121 goto RestartInstruction;
1122 }
1123 } else {
1124 // We'll try to fold it later if it's profitable.
1125 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1126 }
1127 }
1128
1129 mop.setReg(NewVReg);
1130 if (mop.isImplicit())
1131 rewriteImplicitOps(li, MI, NewVReg, vrm);
1132
1133 // Reuse NewVReg for other reads.
1134 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1135 MachineOperand &mopj = MI->getOperand(Ops[j]);
1136 mopj.setReg(NewVReg);
1137 if (mopj.isImplicit())
1138 rewriteImplicitOps(li, MI, NewVReg, vrm);
1139 }
1140
1141 if (CreatedNewVReg) {
1142 if (DefIsReMat) {
1143 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1144 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1145 // Each valnum may have its own remat id.
1146 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1147 } else {
1148 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1149 }
1150 if (!CanDelete || (HasUse && HasDef)) {
1151 // If this is a two-addr instruction then its use operands are
1152 // rematerializable but its def is not. It should be assigned a
1153 // stack slot.
1154 vrm.assignVirt2StackSlot(NewVReg, Slot);
1155 }
1156 } else {
1157 vrm.assignVirt2StackSlot(NewVReg, Slot);
1158 }
1159 } else if (HasUse && HasDef &&
1160 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1161 // If this interval hasn't been assigned a stack slot (because earlier
1162 // def is a deleted remat def), do it now.
1163 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1164 vrm.assignVirt2StackSlot(NewVReg, Slot);
1165 }
1166
1167 // Re-matting an instruction with virtual register use. Add the
1168 // register as an implicit use on the use MI.
1169 if (DefIsReMat && ImpUse)
1170 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1171
1172 // Create a new register interval for this spill / remat.
1173 LiveInterval &nI = getOrCreateInterval(NewVReg);
1174 if (CreatedNewVReg) {
1175 NewLIs.push_back(&nI);
1176 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1177 if (TrySplit)
1178 vrm.setIsSplitFromReg(NewVReg, li.reg);
1179 }
1180
1181 if (HasUse) {
1182 if (CreatedNewVReg) {
1183 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1184 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1185 DEBUG(errs() << " +" << LR);
1186 nI.addRange(LR);
1187 } else {
1188 // Extend the split live interval to this def / use.
1189 SlotIndex End = index.getDefIndex();
1190 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1191 nI.getValNumInfo(nI.getNumValNums()-1));
1192 DEBUG(errs() << " +" << LR);
1193 nI.addRange(LR);
1194 }
1195 }
1196 if (HasDef) {
1197 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1198 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1199 DEBUG(errs() << " +" << LR);
1200 nI.addRange(LR);
1201 }
1202
1203 DEBUG({
1204 errs() << "\t\t\t\tAdded new interval: ";
1205 nI.print(errs(), tri_);
1206 errs() << '\n';
1207 });
1208 }
1209 return CanFold;
1210}
1211bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1212 const VNInfo *VNI,
1213 MachineBasicBlock *MBB,
1214 SlotIndex Idx) const {
1215 SlotIndex End = getMBBEndIdx(MBB);
1216 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1217 if (VNI->kills[j].isPHI())
1218 continue;
1219
1220 SlotIndex KillIdx = VNI->kills[j];
1221 if (KillIdx > Idx && KillIdx < End)
1222 return true;
1223 }
1224 return false;
1225}
1226
1227/// RewriteInfo - Keep track of machine instrs that will be rewritten
1228/// during spilling.
1229namespace {
1230 struct RewriteInfo {
1231 SlotIndex Index;
1232 MachineInstr *MI;
1233 bool HasUse;
1234 bool HasDef;
1235 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1236 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1237 };
1238
1239 struct RewriteInfoCompare {
1240 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1241 return LHS.Index < RHS.Index;
1242 }
1243 };
1244}
1245
1246void LiveIntervals::
1247rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1248 LiveInterval::Ranges::const_iterator &I,
1249 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1250 unsigned Slot, int LdSlot,
1251 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1252 VirtRegMap &vrm,
1253 const TargetRegisterClass* rc,
1254 SmallVector<int, 4> &ReMatIds,
1255 const MachineLoopInfo *loopInfo,
1256 BitVector &SpillMBBs,
1257 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1258 BitVector &RestoreMBBs,
1259 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1260 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1261 std::vector<LiveInterval*> &NewLIs) {
1262 bool AllCanFold = true;
1263 unsigned NewVReg = 0;
1264 SlotIndex start = I->start.getBaseIndex();
1265 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1266
1267 // First collect all the def / use in this live range that will be rewritten.
1268 // Make sure they are sorted according to instruction index.
1269 std::vector<RewriteInfo> RewriteMIs;
1270 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1271 re = mri_->reg_end(); ri != re; ) {
1272 MachineInstr *MI = &*ri;
1273 MachineOperand &O = ri.getOperand();
1274 ++ri;
1275 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1276 SlotIndex index = getInstructionIndex(MI);
1277 if (index < start || index >= end)
1278 continue;
1279
1280 if (O.isUndef())
1281 // Must be defined by an implicit def. It should not be spilled. Note,
1282 // this is for correctness reason. e.g.
1283 // 8 %reg1024<def> = IMPLICIT_DEF
1284 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1285 // The live range [12, 14) are not part of the r1024 live interval since
1286 // it's defined by an implicit def. It will not conflicts with live
1287 // interval of r1025. Now suppose both registers are spilled, you can
1288 // easily see a situation where both registers are reloaded before
1289 // the INSERT_SUBREG and both target registers that would overlap.
1290 continue;
1291 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1292 }
1293 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1294
1295 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1296 // Now rewrite the defs and uses.
1297 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1298 RewriteInfo &rwi = RewriteMIs[i];
1299 ++i;
1300 SlotIndex index = rwi.Index;
1301 bool MIHasUse = rwi.HasUse;
1302 bool MIHasDef = rwi.HasDef;
1303 MachineInstr *MI = rwi.MI;
1304 // If MI def and/or use the same register multiple times, then there
1305 // are multiple entries.
1306 unsigned NumUses = MIHasUse;
1307 while (i != e && RewriteMIs[i].MI == MI) {
1308 assert(RewriteMIs[i].Index == index);
1309 bool isUse = RewriteMIs[i].HasUse;
1310 if (isUse) ++NumUses;
1311 MIHasUse |= isUse;
1312 MIHasDef |= RewriteMIs[i].HasDef;
1313 ++i;
1314 }
1315 MachineBasicBlock *MBB = MI->getParent();
1316
1317 if (ImpUse && MI != ReMatDefMI) {
1318 // Re-matting an instruction with virtual register use. Update the
1319 // register interval's spill weight to HUGE_VALF to prevent it from
1320 // being spilled.
1321 LiveInterval &ImpLi = getInterval(ImpUse);
1322 ImpLi.weight = HUGE_VALF;
1323 }
1324
1325 unsigned MBBId = MBB->getNumber();
1326 unsigned ThisVReg = 0;
1327 if (TrySplit) {
1328 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1329 if (NVI != MBBVRegsMap.end()) {
1330 ThisVReg = NVI->second;
1331 // One common case:
1332 // x = use
1333 // ...
1334 // ...
1335 // def = ...
1336 // = use
1337 // It's better to start a new interval to avoid artifically
1338 // extend the new interval.
1339 if (MIHasDef && !MIHasUse) {
1340 MBBVRegsMap.erase(MBB->getNumber());
1341 ThisVReg = 0;
1342 }
1343 }
1344 }
1345
1346 bool IsNew = ThisVReg == 0;
1347 if (IsNew) {
1348 // This ends the previous live interval. If all of its def / use
1349 // can be folded, give it a low spill weight.
1350 if (NewVReg && TrySplit && AllCanFold) {
1351 LiveInterval &nI = getOrCreateInterval(NewVReg);
1352 nI.weight /= 10.0F;
1353 }
1354 AllCanFold = true;
1355 }
1356 NewVReg = ThisVReg;
1357
1358 bool HasDef = false;
1359 bool HasUse = false;
1360 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1361 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1362 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1363 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1364 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1365 if (!HasDef && !HasUse)
1366 continue;
1367
1368 AllCanFold &= CanFold;
1369
1370 // Update weight of spill interval.
1371 LiveInterval &nI = getOrCreateInterval(NewVReg);
1372 if (!TrySplit) {
1373 // The spill weight is now infinity as it cannot be spilled again.
1374 nI.weight = HUGE_VALF;
1375 continue;
1376 }
1377
1378 // Keep track of the last def and first use in each MBB.
1379 if (HasDef) {
1380 if (MI != ReMatOrigDefMI || !CanDelete) {
1381 bool HasKill = false;
1382 if (!HasUse)
1383 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1384 else {
1385 // If this is a two-address code, then this index starts a new VNInfo.
1386 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1387 if (VNI)
1388 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1389 }
1390 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1391 SpillIdxes.find(MBBId);
1392 if (!HasKill) {
1393 if (SII == SpillIdxes.end()) {
1394 std::vector<SRInfo> S;
1395 S.push_back(SRInfo(index, NewVReg, true));
1396 SpillIdxes.insert(std::make_pair(MBBId, S));
1397 } else if (SII->second.back().vreg != NewVReg) {
1398 SII->second.push_back(SRInfo(index, NewVReg, true));
1399 } else if (index > SII->second.back().index) {
1400 // If there is an earlier def and this is a two-address
1401 // instruction, then it's not possible to fold the store (which
1402 // would also fold the load).
1403 SRInfo &Info = SII->second.back();
1404 Info.index = index;
1405 Info.canFold = !HasUse;
1406 }
1407 SpillMBBs.set(MBBId);
1408 } else if (SII != SpillIdxes.end() &&
1409 SII->second.back().vreg == NewVReg &&
1410 index > SII->second.back().index) {
1411 // There is an earlier def that's not killed (must be two-address).
1412 // The spill is no longer needed.
1413 SII->second.pop_back();
1414 if (SII->second.empty()) {
1415 SpillIdxes.erase(MBBId);
1416 SpillMBBs.reset(MBBId);
1417 }
1418 }
1419 }
1420 }
1421
1422 if (HasUse) {
1423 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1424 SpillIdxes.find(MBBId);
1425 if (SII != SpillIdxes.end() &&
1426 SII->second.back().vreg == NewVReg &&
1427 index > SII->second.back().index)
1428 // Use(s) following the last def, it's not safe to fold the spill.
1429 SII->second.back().canFold = false;
1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1431 RestoreIdxes.find(MBBId);
1432 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1433 // If we are splitting live intervals, only fold if it's the first
1434 // use and there isn't another use later in the MBB.
1435 RII->second.back().canFold = false;
1436 else if (IsNew) {
1437 // Only need a reload if there isn't an earlier def / use.
1438 if (RII == RestoreIdxes.end()) {
1439 std::vector<SRInfo> Infos;
1440 Infos.push_back(SRInfo(index, NewVReg, true));
1441 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1442 } else {
1443 RII->second.push_back(SRInfo(index, NewVReg, true));
1444 }
1445 RestoreMBBs.set(MBBId);
1446 }
1447 }
1448
1449 // Update spill weight.
1450 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1451 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1452 }
1453
1454 if (NewVReg && TrySplit && AllCanFold) {
1455 // If all of its def / use can be folded, give it a low spill weight.
1456 LiveInterval &nI = getOrCreateInterval(NewVReg);
1457 nI.weight /= 10.0F;
1458 }
1459}
1460
1461bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1462 unsigned vr, BitVector &RestoreMBBs,
1463 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1464 if (!RestoreMBBs[Id])
1465 return false;
1466 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1467 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1468 if (Restores[i].index == index &&
1469 Restores[i].vreg == vr &&
1470 Restores[i].canFold)
1471 return true;
1472 return false;
1473}
1474
1475void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1476 unsigned vr, BitVector &RestoreMBBs,
1477 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1478 if (!RestoreMBBs[Id])
1479 return;
1480 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1481 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1482 if (Restores[i].index == index && Restores[i].vreg)
1483 Restores[i].index = SlotIndex();
1484}
1485
1486/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1487/// spilled and create empty intervals for their uses.
1488void
1489LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1490 const TargetRegisterClass* rc,
1491 std::vector<LiveInterval*> &NewLIs) {
1492 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1493 re = mri_->reg_end(); ri != re; ) {
1494 MachineOperand &O = ri.getOperand();
1495 MachineInstr *MI = &*ri;
1496 ++ri;
1497 if (O.isDef()) {
1498 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1499 "Register def was not rewritten?");
1500 RemoveMachineInstrFromMaps(MI);
1501 vrm.RemoveMachineInstrFromMaps(MI);
1502 MI->eraseFromParent();
1503 } else {
1504 // This must be an use of an implicit_def so it's not part of the live
1505 // interval. Create a new empty live interval for it.
1506 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1507 unsigned NewVReg = mri_->createVirtualRegister(rc);
1508 vrm.grow();
1509 vrm.setIsImplicitlyDefined(NewVReg);
1510 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1511 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1512 MachineOperand &MO = MI->getOperand(i);
1513 if (MO.isReg() && MO.getReg() == li.reg) {
1514 MO.setReg(NewVReg);
1515 MO.setIsUndef();
1516 }
1517 }
1518 }
1519 }
1520}
1521
1522std::vector<LiveInterval*> LiveIntervals::
1523addIntervalsForSpillsFast(const LiveInterval &li,
1524 const MachineLoopInfo *loopInfo,
1525 VirtRegMap &vrm) {
1526 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1527
1528 std::vector<LiveInterval*> added;
1529
1530 assert(li.weight != HUGE_VALF &&
1531 "attempt to spill already spilled interval!");
1532
1533 DEBUG({
1534 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1535 li.dump();
1536 errs() << '\n';
1537 });
1538
1539 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1540
1541 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1542 while (RI != mri_->reg_end()) {
1543 MachineInstr* MI = &*RI;
1544
1545 SmallVector<unsigned, 2> Indices;
1546 bool HasUse = false;
1547 bool HasDef = false;
1548
1549 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1550 MachineOperand& mop = MI->getOperand(i);
1551 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1552
1553 HasUse |= MI->getOperand(i).isUse();
1554 HasDef |= MI->getOperand(i).isDef();
1555
1556 Indices.push_back(i);
1557 }
1558
1559 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1560 Indices, true, slot, li.reg)) {
1561 unsigned NewVReg = mri_->createVirtualRegister(rc);
1562 vrm.grow();
1563 vrm.assignVirt2StackSlot(NewVReg, slot);
1564
1565 // create a new register for this spill
1566 LiveInterval &nI = getOrCreateInterval(NewVReg);
1567
1568 // the spill weight is now infinity as it
1569 // cannot be spilled again
1570 nI.weight = HUGE_VALF;
1571
1572 // Rewrite register operands to use the new vreg.
1573 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1574 E = Indices.end(); I != E; ++I) {
1575 MI->getOperand(*I).setReg(NewVReg);
1576
1577 if (MI->getOperand(*I).isUse())
1578 MI->getOperand(*I).setIsKill(true);
1579 }
1580
1581 // Fill in the new live interval.
1582 SlotIndex index = getInstructionIndex(MI);
1583 if (HasUse) {
1584 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1585 nI.getNextValue(SlotIndex(), 0, false,
1586 getVNInfoAllocator()));
1587 DEBUG(errs() << " +" << LR);
1588 nI.addRange(LR);
1589 vrm.addRestorePoint(NewVReg, MI);
1590 }
1591 if (HasDef) {
1592 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1593 nI.getNextValue(SlotIndex(), 0, false,
1594 getVNInfoAllocator()));
1595 DEBUG(errs() << " +" << LR);
1596 nI.addRange(LR);
1597 vrm.addSpillPoint(NewVReg, true, MI);
1598 }
1599
1600 added.push_back(&nI);
1601
1602 DEBUG({
1603 errs() << "\t\t\t\tadded new interval: ";
1604 nI.dump();
1605 errs() << '\n';
1606 });
1607 }
1608
1609
1610 RI = mri_->reg_begin(li.reg);
1611 }
1612
1613 return added;
1614}
1615
1616std::vector<LiveInterval*> LiveIntervals::
1617addIntervalsForSpills(const LiveInterval &li,
1618 SmallVectorImpl<LiveInterval*> &SpillIs,
1619 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1620
1621 if (EnableFastSpilling)
1622 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1623
1624 assert(li.weight != HUGE_VALF &&
1625 "attempt to spill already spilled interval!");
1626
1627 DEBUG({
1628 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1629 li.print(errs(), tri_);
1630 errs() << '\n';
1631 });
1632
1633 // Each bit specify whether a spill is required in the MBB.
1634 BitVector SpillMBBs(mf_->getNumBlockIDs());
1635 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1636 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1637 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1638 DenseMap<unsigned,unsigned> MBBVRegsMap;
1639 std::vector<LiveInterval*> NewLIs;
1640 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1641
1642 unsigned NumValNums = li.getNumValNums();
1643 SmallVector<MachineInstr*, 4> ReMatDefs;
1644 ReMatDefs.resize(NumValNums, NULL);
1645 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1646 ReMatOrigDefs.resize(NumValNums, NULL);
1647 SmallVector<int, 4> ReMatIds;
1648 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1649 BitVector ReMatDelete(NumValNums);
1650 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1651
1652 // Spilling a split live interval. It cannot be split any further. Also,
1653 // it's also guaranteed to be a single val# / range interval.
1654 if (vrm.getPreSplitReg(li.reg)) {
1655 vrm.setIsSplitFromReg(li.reg, 0);
1656 // Unset the split kill marker on the last use.
1657 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1658 if (KillIdx != SlotIndex()) {
1659 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1660 assert(KillMI && "Last use disappeared?");
1661 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1662 assert(KillOp != -1 && "Last use disappeared?");
1663 KillMI->getOperand(KillOp).setIsKill(false);
1664 }
1665 vrm.removeKillPoint(li.reg);
1666 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1667 Slot = vrm.getStackSlot(li.reg);
1668 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1669 MachineInstr *ReMatDefMI = DefIsReMat ?
1670 vrm.getReMaterializedMI(li.reg) : NULL;
1671 int LdSlot = 0;
1672 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1673 bool isLoad = isLoadSS ||
1674 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1675 bool IsFirstRange = true;
1676 for (LiveInterval::Ranges::const_iterator
1677 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1678 // If this is a split live interval with multiple ranges, it means there
1679 // are two-address instructions that re-defined the value. Only the
1680 // first def can be rematerialized!
1681 if (IsFirstRange) {
1682 // Note ReMatOrigDefMI has already been deleted.
1683 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1684 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1685 false, vrm, rc, ReMatIds, loopInfo,
1686 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1687 MBBVRegsMap, NewLIs);
1688 } else {
1689 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1690 Slot, 0, false, false, false,
1691 false, vrm, rc, ReMatIds, loopInfo,
1692 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1693 MBBVRegsMap, NewLIs);
1694 }
1695 IsFirstRange = false;
1696 }
1697
1698 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1699 return NewLIs;
1700 }
1701
1702 bool TrySplit = !intervalIsInOneMBB(li);
1703 if (TrySplit)
1704 ++numSplits;
1705 bool NeedStackSlot = false;
1706 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1707 i != e; ++i) {
1708 const VNInfo *VNI = *i;
1709 unsigned VN = VNI->id;
1710 if (VNI->isUnused())
1711 continue; // Dead val#.
1712 // Is the def for the val# rematerializable?
1713 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1714 ? getInstructionFromIndex(VNI->def) : 0;
1715 bool dummy;
1716 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1717 // Remember how to remat the def of this val#.
1718 ReMatOrigDefs[VN] = ReMatDefMI;
1719 // Original def may be modified so we have to make a copy here.
1720 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1721 CloneMIs.push_back(Clone);
1722 ReMatDefs[VN] = Clone;
1723
1724 bool CanDelete = true;
1725 if (VNI->hasPHIKill()) {
1726 // A kill is a phi node, not all of its uses can be rematerialized.
1727 // It must not be deleted.
1728 CanDelete = false;
1729 // Need a stack slot if there is any live range where uses cannot be
1730 // rematerialized.
1731 NeedStackSlot = true;
1732 }
1733 if (CanDelete)
1734 ReMatDelete.set(VN);
1735 } else {
1736 // Need a stack slot if there is any live range where uses cannot be
1737 // rematerialized.
1738 NeedStackSlot = true;
1739 }
1740 }
1741
1742 // One stack slot per live interval.
1743 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1744 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1745 Slot = vrm.assignVirt2StackSlot(li.reg);
1746
1747 // This case only occurs when the prealloc splitter has already assigned
1748 // a stack slot to this vreg.
1749 else
1750 Slot = vrm.getStackSlot(li.reg);
1751 }
1752
1753 // Create new intervals and rewrite defs and uses.
1754 for (LiveInterval::Ranges::const_iterator
1755 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1756 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1757 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1758 bool DefIsReMat = ReMatDefMI != NULL;
1759 bool CanDelete = ReMatDelete[I->valno->id];
1760 int LdSlot = 0;
1761 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1762 bool isLoad = isLoadSS ||
1763 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1764 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1765 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1766 CanDelete, vrm, rc, ReMatIds, loopInfo,
1767 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1768 MBBVRegsMap, NewLIs);
1769 }
1770
1771 // Insert spills / restores if we are splitting.
1772 if (!TrySplit) {
1773 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1774 return NewLIs;
1775 }
1776
1777 SmallPtrSet<LiveInterval*, 4> AddedKill;
1778 SmallVector<unsigned, 2> Ops;
1779 if (NeedStackSlot) {
1780 int Id = SpillMBBs.find_first();
1781 while (Id != -1) {
1782 std::vector<SRInfo> &spills = SpillIdxes[Id];
1783 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1784 SlotIndex index = spills[i].index;
1785 unsigned VReg = spills[i].vreg;
1786 LiveInterval &nI = getOrCreateInterval(VReg);
1787 bool isReMat = vrm.isReMaterialized(VReg);
1788 MachineInstr *MI = getInstructionFromIndex(index);
1789 bool CanFold = false;
1790 bool FoundUse = false;
1791 Ops.clear();
1792 if (spills[i].canFold) {
1793 CanFold = true;
1794 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1795 MachineOperand &MO = MI->getOperand(j);
1796 if (!MO.isReg() || MO.getReg() != VReg)
1797 continue;
1798
1799 Ops.push_back(j);
1800 if (MO.isDef())
1801 continue;
1802 if (isReMat ||
1803 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1804 RestoreMBBs, RestoreIdxes))) {
1805 // MI has two-address uses of the same register. If the use
1806 // isn't the first and only use in the BB, then we can't fold
1807 // it. FIXME: Move this to rewriteInstructionsForSpills.
1808 CanFold = false;
1809 break;
1810 }
1811 FoundUse = true;
1812 }
1813 }
1814 // Fold the store into the def if possible.
1815 bool Folded = false;
1816 if (CanFold && !Ops.empty()) {
1817 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1818 Folded = true;
1819 if (FoundUse) {
1820 // Also folded uses, do not issue a load.
1821 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1822 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1823 }
1824 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1825 }
1826 }
1827
1828 // Otherwise tell the spiller to issue a spill.
1829 if (!Folded) {
1830 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1831 bool isKill = LR->end == index.getStoreIndex();
1832 if (!MI->registerDefIsDead(nI.reg))
1833 // No need to spill a dead def.
1834 vrm.addSpillPoint(VReg, isKill, MI);
1835 if (isKill)
1836 AddedKill.insert(&nI);
1837 }
1838 }
1839 Id = SpillMBBs.find_next(Id);
1840 }
1841 }
1842
1843 int Id = RestoreMBBs.find_first();
1844 while (Id != -1) {
1845 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1846 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1847 SlotIndex index = restores[i].index;
1848 if (index == SlotIndex())
1849 continue;
1850 unsigned VReg = restores[i].vreg;
1851 LiveInterval &nI = getOrCreateInterval(VReg);
1852 bool isReMat = vrm.isReMaterialized(VReg);
1853 MachineInstr *MI = getInstructionFromIndex(index);
1854 bool CanFold = false;
1855 Ops.clear();
1856 if (restores[i].canFold) {
1857 CanFold = true;
1858 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1859 MachineOperand &MO = MI->getOperand(j);
1860 if (!MO.isReg() || MO.getReg() != VReg)
1861 continue;
1862
1863 if (MO.isDef()) {
1864 // If this restore were to be folded, it would have been folded
1865 // already.
1866 CanFold = false;
1867 break;
1868 }
1869 Ops.push_back(j);
1870 }
1871 }
1872
1873 // Fold the load into the use if possible.
1874 bool Folded = false;
1875 if (CanFold && !Ops.empty()) {
1876 if (!isReMat)
1877 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1878 else {
1879 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1880 int LdSlot = 0;
1881 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1882 // If the rematerializable def is a load, also try to fold it.
1883 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1884 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1885 Ops, isLoadSS, LdSlot, VReg);
1886 if (!Folded) {
1887 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1888 if (ImpUse) {
1889 // Re-matting an instruction with virtual register use. Add the
1890 // register as an implicit use on the use MI and update the register
1891 // interval's spill weight to HUGE_VALF to prevent it from being
1892 // spilled.
1893 LiveInterval &ImpLi = getInterval(ImpUse);
1894 ImpLi.weight = HUGE_VALF;
1895 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1896 }
1897 }
1898 }
1899 }
1900 // If folding is not possible / failed, then tell the spiller to issue a
1901 // load / rematerialization for us.
1902 if (Folded)
1903 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1904 else
1905 vrm.addRestorePoint(VReg, MI);
1906 }
1907 Id = RestoreMBBs.find_next(Id);
1908 }
1909
1910 // Finalize intervals: add kills, finalize spill weights, and filter out
1911 // dead intervals.
1912 std::vector<LiveInterval*> RetNewLIs;
1913 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1914 LiveInterval *LI = NewLIs[i];
1915 if (!LI->empty()) {
1916 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1917 if (!AddedKill.count(LI)) {
1918 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1919 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1920 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1921 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1922 assert(UseIdx != -1);
1923 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1924 LastUse->getOperand(UseIdx).setIsKill();
1925 vrm.addKillPoint(LI->reg, LastUseIdx);
1926 }
1927 }
1928 RetNewLIs.push_back(LI);
1929 }
1930 }
1931
1932 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1933 return RetNewLIs;
1934}
1935
1936/// hasAllocatableSuperReg - Return true if the specified physical register has
1937/// any super register that's allocatable.
1938bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1939 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1940 if (allocatableRegs_[*AS] && hasInterval(*AS))
1941 return true;
1942 return false;
1943}
1944
1945/// getRepresentativeReg - Find the largest super register of the specified
1946/// physical register.
1947unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1948 // Find the largest super-register that is allocatable.
1949 unsigned BestReg = Reg;
1950 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1951 unsigned SuperReg = *AS;
1952 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1953 BestReg = SuperReg;
1954 break;
1955 }
1956 }
1957 return BestReg;
1958}
1959
1960/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1961/// specified interval that conflicts with the specified physical register.
1962unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1963 unsigned PhysReg) const {
1964 unsigned NumConflicts = 0;
1965 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1966 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1967 E = mri_->reg_end(); I != E; ++I) {
1968 MachineOperand &O = I.getOperand();
1969 MachineInstr *MI = O.getParent();
1970 SlotIndex Index = getInstructionIndex(MI);
1971 if (pli.liveAt(Index))
1972 ++NumConflicts;
1973 }
1974 return NumConflicts;
1975}
1976
1977/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1978/// around all defs and uses of the specified interval. Return true if it
1979/// was able to cut its interval.
1980bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1981 unsigned PhysReg, VirtRegMap &vrm) {
1982 unsigned SpillReg = getRepresentativeReg(PhysReg);
1983
1984 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1985 // If there are registers which alias PhysReg, but which are not a
1986 // sub-register of the chosen representative super register. Assert
1987 // since we can't handle it yet.
1988 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1989 tri_->isSuperRegister(*AS, SpillReg));
1990
1991 bool Cut = false;
1992 SmallVector<unsigned, 4> PRegs;
1993 if (hasInterval(SpillReg))
1994 PRegs.push_back(SpillReg);
1995 else {
1996 SmallSet<unsigned, 4> Added;
1997 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1998 if (Added.insert(*AS) && hasInterval(*AS)) {
1999 PRegs.push_back(*AS);
2000 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2001 Added.insert(*ASS);
2002 }
2003 }
2004
2005 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2006 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2007 E = mri_->reg_end(); I != E; ++I) {
2008 MachineOperand &O = I.getOperand();
2009 MachineInstr *MI = O.getParent();
2010 if (SeenMIs.count(MI))
2011 continue;
2012 SeenMIs.insert(MI);
2013 SlotIndex Index = getInstructionIndex(MI);
2014 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2015 unsigned PReg = PRegs[i];
2016 LiveInterval &pli = getInterval(PReg);
2017 if (!pli.liveAt(Index))
2018 continue;
2019 vrm.addEmergencySpill(PReg, MI);
2020 SlotIndex StartIdx = Index.getLoadIndex();
2021 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2022 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2023 pli.removeRange(StartIdx, EndIdx);
2024 Cut = true;
2025 } else {
2026 std::string msg;
2027 raw_string_ostream Msg(msg);
2028 Msg << "Ran out of registers during register allocation!";
2029 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2030 Msg << "\nPlease check your inline asm statement for invalid "
2031 << "constraints:\n";
2032 MI->print(Msg, tm_);
2033 }
2034 llvm_report_error(Msg.str());
2035 }
2036 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2037 if (!hasInterval(*AS))
2038 continue;
2039 LiveInterval &spli = getInterval(*AS);
2040 if (spli.liveAt(Index))
2041 spli.removeRange(Index.getLoadIndex(),
2042 Index.getNextIndex().getBaseIndex());
2043 }
2044 }
2045 }
2046 return Cut;
2047}
2048
2049LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2050 MachineInstr* startInst) {
2051 LiveInterval& Interval = getOrCreateInterval(reg);
2052 VNInfo* VN = Interval.getNextValue(
2053 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2054 startInst, true, getVNInfoAllocator());
2055 VN->setHasPHIKill(true);
2056 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2057 LiveRange LR(
2058 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2059 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2060 Interval.addRange(LR);
2061
2062 return LR;
2063}
2064