RegAllocGreedy.cpp revision 239462
1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the RAGreedy function pass for register allocation in 11// optimized builds. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "AllocationOrder.h" 17#include "InterferenceCache.h" 18#include "LiveDebugVariables.h" 19#include "LiveRegMatrix.h" 20#include "RegAllocBase.h" 21#include "Spiller.h" 22#include "SpillPlacement.h" 23#include "SplitKit.h" 24#include "VirtRegMap.h" 25#include "llvm/ADT/Statistic.h" 26#include "llvm/Analysis/AliasAnalysis.h" 27#include "llvm/Function.h" 28#include "llvm/PassAnalysisSupport.h" 29#include "llvm/CodeGen/CalcSpillWeights.h" 30#include "llvm/CodeGen/EdgeBundles.h" 31#include "llvm/CodeGen/LiveIntervalAnalysis.h" 32#include "llvm/CodeGen/LiveRangeEdit.h" 33#include "llvm/CodeGen/LiveStackAnalysis.h" 34#include "llvm/CodeGen/MachineDominators.h" 35#include "llvm/CodeGen/MachineFunctionPass.h" 36#include "llvm/CodeGen/MachineLoopInfo.h" 37#include "llvm/CodeGen/MachineRegisterInfo.h" 38#include "llvm/CodeGen/Passes.h" 39#include "llvm/CodeGen/RegAllocRegistry.h" 40#include "llvm/Target/TargetOptions.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/raw_ostream.h" 45#include "llvm/Support/Timer.h" 46 47#include <queue> 48 49using namespace llvm; 50 51STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 52STATISTIC(NumLocalSplits, "Number of split local live ranges"); 53STATISTIC(NumEvicted, "Number of interferences evicted"); 54 55static cl::opt<SplitEditor::ComplementSpillMode> 56SplitSpillMode("split-spill-mode", cl::Hidden, 57 cl::desc("Spill mode for splitting live ranges"), 58 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 59 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 60 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 61 clEnumValEnd), 62 cl::init(SplitEditor::SM_Partition)); 63 64static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 65 createGreedyRegisterAllocator); 66 67namespace { 68class RAGreedy : public MachineFunctionPass, 69 public RegAllocBase, 70 private LiveRangeEdit::Delegate { 71 72 // context 73 MachineFunction *MF; 74 75 // analyses 76 SlotIndexes *Indexes; 77 MachineDominatorTree *DomTree; 78 MachineLoopInfo *Loops; 79 EdgeBundles *Bundles; 80 SpillPlacement *SpillPlacer; 81 LiveDebugVariables *DebugVars; 82 83 // state 84 std::auto_ptr<Spiller> SpillerInstance; 85 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 86 unsigned NextCascade; 87 88 // Live ranges pass through a number of stages as we try to allocate them. 89 // Some of the stages may also create new live ranges: 90 // 91 // - Region splitting. 92 // - Per-block splitting. 93 // - Local splitting. 94 // - Spilling. 95 // 96 // Ranges produced by one of the stages skip the previous stages when they are 97 // dequeued. This improves performance because we can skip interference checks 98 // that are unlikely to give any results. It also guarantees that the live 99 // range splitting algorithm terminates, something that is otherwise hard to 100 // ensure. 101 enum LiveRangeStage { 102 /// Newly created live range that has never been queued. 103 RS_New, 104 105 /// Only attempt assignment and eviction. Then requeue as RS_Split. 106 RS_Assign, 107 108 /// Attempt live range splitting if assignment is impossible. 109 RS_Split, 110 111 /// Attempt more aggressive live range splitting that is guaranteed to make 112 /// progress. This is used for split products that may not be making 113 /// progress. 114 RS_Split2, 115 116 /// Live range will be spilled. No more splitting will be attempted. 117 RS_Spill, 118 119 /// There is nothing more we can do to this live range. Abort compilation 120 /// if it can't be assigned. 121 RS_Done 122 }; 123 124 static const char *const StageName[]; 125 126 // RegInfo - Keep additional information about each live range. 127 struct RegInfo { 128 LiveRangeStage Stage; 129 130 // Cascade - Eviction loop prevention. See canEvictInterference(). 131 unsigned Cascade; 132 133 RegInfo() : Stage(RS_New), Cascade(0) {} 134 }; 135 136 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 137 138 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 139 return ExtraRegInfo[VirtReg.reg].Stage; 140 } 141 142 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 143 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 144 ExtraRegInfo[VirtReg.reg].Stage = Stage; 145 } 146 147 template<typename Iterator> 148 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 149 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 150 for (;Begin != End; ++Begin) { 151 unsigned Reg = (*Begin)->reg; 152 if (ExtraRegInfo[Reg].Stage == RS_New) 153 ExtraRegInfo[Reg].Stage = NewStage; 154 } 155 } 156 157 /// Cost of evicting interference. 158 struct EvictionCost { 159 unsigned BrokenHints; ///< Total number of broken hints. 160 float MaxWeight; ///< Maximum spill weight evicted. 161 162 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 163 164 bool operator<(const EvictionCost &O) const { 165 if (BrokenHints != O.BrokenHints) 166 return BrokenHints < O.BrokenHints; 167 return MaxWeight < O.MaxWeight; 168 } 169 }; 170 171 // splitting state. 172 std::auto_ptr<SplitAnalysis> SA; 173 std::auto_ptr<SplitEditor> SE; 174 175 /// Cached per-block interference maps 176 InterferenceCache IntfCache; 177 178 /// All basic blocks where the current register has uses. 179 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 180 181 /// Global live range splitting candidate info. 182 struct GlobalSplitCandidate { 183 // Register intended for assignment, or 0. 184 unsigned PhysReg; 185 186 // SplitKit interval index for this candidate. 187 unsigned IntvIdx; 188 189 // Interference for PhysReg. 190 InterferenceCache::Cursor Intf; 191 192 // Bundles where this candidate should be live. 193 BitVector LiveBundles; 194 SmallVector<unsigned, 8> ActiveBlocks; 195 196 void reset(InterferenceCache &Cache, unsigned Reg) { 197 PhysReg = Reg; 198 IntvIdx = 0; 199 Intf.setPhysReg(Cache, Reg); 200 LiveBundles.clear(); 201 ActiveBlocks.clear(); 202 } 203 204 // Set B[i] = C for every live bundle where B[i] was NoCand. 205 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 206 unsigned Count = 0; 207 for (int i = LiveBundles.find_first(); i >= 0; 208 i = LiveBundles.find_next(i)) 209 if (B[i] == NoCand) { 210 B[i] = C; 211 Count++; 212 } 213 return Count; 214 } 215 }; 216 217 /// Candidate info for for each PhysReg in AllocationOrder. 218 /// This vector never shrinks, but grows to the size of the largest register 219 /// class. 220 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 221 222 enum { NoCand = ~0u }; 223 224 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 225 /// NoCand which indicates the stack interval. 226 SmallVector<unsigned, 32> BundleCand; 227 228public: 229 RAGreedy(); 230 231 /// Return the pass name. 232 virtual const char* getPassName() const { 233 return "Greedy Register Allocator"; 234 } 235 236 /// RAGreedy analysis usage. 237 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 238 virtual void releaseMemory(); 239 virtual Spiller &spiller() { return *SpillerInstance; } 240 virtual void enqueue(LiveInterval *LI); 241 virtual LiveInterval *dequeue(); 242 virtual unsigned selectOrSplit(LiveInterval&, 243 SmallVectorImpl<LiveInterval*>&); 244 245 /// Perform register allocation. 246 virtual bool runOnMachineFunction(MachineFunction &mf); 247 248 static char ID; 249 250private: 251 bool LRE_CanEraseVirtReg(unsigned); 252 void LRE_WillShrinkVirtReg(unsigned); 253 void LRE_DidCloneVirtReg(unsigned, unsigned); 254 255 float calcSpillCost(); 256 bool addSplitConstraints(InterferenceCache::Cursor, float&); 257 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 258 void growRegion(GlobalSplitCandidate &Cand); 259 float calcGlobalSplitCost(GlobalSplitCandidate&); 260 bool calcCompactRegion(GlobalSplitCandidate&); 261 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 262 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 263 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 264 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 265 void evictInterference(LiveInterval&, unsigned, 266 SmallVectorImpl<LiveInterval*>&); 267 268 unsigned tryAssign(LiveInterval&, AllocationOrder&, 269 SmallVectorImpl<LiveInterval*>&); 270 unsigned tryEvict(LiveInterval&, AllocationOrder&, 271 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 272 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 273 SmallVectorImpl<LiveInterval*>&); 274 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 275 SmallVectorImpl<LiveInterval*>&); 276 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 277 SmallVectorImpl<LiveInterval*>&); 278 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 279 SmallVectorImpl<LiveInterval*>&); 280 unsigned trySplit(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<LiveInterval*>&); 282}; 283} // end anonymous namespace 284 285char RAGreedy::ID = 0; 286 287#ifndef NDEBUG 288const char *const RAGreedy::StageName[] = { 289 "RS_New", 290 "RS_Assign", 291 "RS_Split", 292 "RS_Split2", 293 "RS_Spill", 294 "RS_Done" 295}; 296#endif 297 298// Hysteresis to use when comparing floats. 299// This helps stabilize decisions based on float comparisons. 300const float Hysteresis = 0.98f; 301 302 303FunctionPass* llvm::createGreedyRegisterAllocator() { 304 return new RAGreedy(); 305} 306 307RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 308 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 309 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 310 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 311 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 312 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 313 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 314 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 315 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 316 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 317 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 318 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 319 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry()); 320 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 321 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 322} 323 324void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 325 AU.setPreservesCFG(); 326 AU.addRequired<AliasAnalysis>(); 327 AU.addPreserved<AliasAnalysis>(); 328 AU.addRequired<LiveIntervals>(); 329 AU.addPreserved<LiveIntervals>(); 330 AU.addRequired<SlotIndexes>(); 331 AU.addPreserved<SlotIndexes>(); 332 AU.addRequired<LiveDebugVariables>(); 333 AU.addPreserved<LiveDebugVariables>(); 334 AU.addRequired<CalculateSpillWeights>(); 335 AU.addRequired<LiveStacks>(); 336 AU.addPreserved<LiveStacks>(); 337 AU.addRequired<MachineDominatorTree>(); 338 AU.addPreserved<MachineDominatorTree>(); 339 AU.addRequired<MachineLoopInfo>(); 340 AU.addPreserved<MachineLoopInfo>(); 341 AU.addRequired<VirtRegMap>(); 342 AU.addPreserved<VirtRegMap>(); 343 AU.addRequired<LiveRegMatrix>(); 344 AU.addPreserved<LiveRegMatrix>(); 345 AU.addRequired<EdgeBundles>(); 346 AU.addRequired<SpillPlacement>(); 347 MachineFunctionPass::getAnalysisUsage(AU); 348} 349 350 351//===----------------------------------------------------------------------===// 352// LiveRangeEdit delegate methods 353//===----------------------------------------------------------------------===// 354 355bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 356 if (VRM->hasPhys(VirtReg)) { 357 Matrix->unassign(LIS->getInterval(VirtReg)); 358 return true; 359 } 360 // Unassigned virtreg is probably in the priority queue. 361 // RegAllocBase will erase it after dequeueing. 362 return false; 363} 364 365void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 366 if (!VRM->hasPhys(VirtReg)) 367 return; 368 369 // Register is assigned, put it back on the queue for reassignment. 370 LiveInterval &LI = LIS->getInterval(VirtReg); 371 Matrix->unassign(LI); 372 enqueue(&LI); 373} 374 375void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 376 // Cloning a register we haven't even heard about yet? Just ignore it. 377 if (!ExtraRegInfo.inBounds(Old)) 378 return; 379 380 // LRE may clone a virtual register because dead code elimination causes it to 381 // be split into connected components. The new components are much smaller 382 // than the original, so they should get a new chance at being assigned. 383 // same stage as the parent. 384 ExtraRegInfo[Old].Stage = RS_Assign; 385 ExtraRegInfo.grow(New); 386 ExtraRegInfo[New] = ExtraRegInfo[Old]; 387} 388 389void RAGreedy::releaseMemory() { 390 SpillerInstance.reset(0); 391 ExtraRegInfo.clear(); 392 GlobalCand.clear(); 393} 394 395void RAGreedy::enqueue(LiveInterval *LI) { 396 // Prioritize live ranges by size, assigning larger ranges first. 397 // The queue holds (size, reg) pairs. 398 const unsigned Size = LI->getSize(); 399 const unsigned Reg = LI->reg; 400 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 401 "Can only enqueue virtual registers"); 402 unsigned Prio; 403 404 ExtraRegInfo.grow(Reg); 405 if (ExtraRegInfo[Reg].Stage == RS_New) 406 ExtraRegInfo[Reg].Stage = RS_Assign; 407 408 if (ExtraRegInfo[Reg].Stage == RS_Split) { 409 // Unsplit ranges that couldn't be allocated immediately are deferred until 410 // everything else has been allocated. 411 Prio = Size; 412 } else { 413 // Everything is allocated in long->short order. Long ranges that don't fit 414 // should be spilled (or split) ASAP so they don't create interference. 415 Prio = (1u << 31) + Size; 416 417 // Boost ranges that have a physical register hint. 418 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 419 Prio |= (1u << 30); 420 } 421 422 Queue.push(std::make_pair(Prio, ~Reg)); 423} 424 425LiveInterval *RAGreedy::dequeue() { 426 if (Queue.empty()) 427 return 0; 428 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 429 Queue.pop(); 430 return LI; 431} 432 433 434//===----------------------------------------------------------------------===// 435// Direct Assignment 436//===----------------------------------------------------------------------===// 437 438/// tryAssign - Try to assign VirtReg to an available register. 439unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 440 AllocationOrder &Order, 441 SmallVectorImpl<LiveInterval*> &NewVRegs) { 442 Order.rewind(); 443 unsigned PhysReg; 444 while ((PhysReg = Order.next())) 445 if (!Matrix->checkInterference(VirtReg, PhysReg)) 446 break; 447 if (!PhysReg || Order.isHint(PhysReg)) 448 return PhysReg; 449 450 // PhysReg is available, but there may be a better choice. 451 452 // If we missed a simple hint, try to cheaply evict interference from the 453 // preferred register. 454 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 455 if (Order.isHint(Hint)) { 456 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 457 EvictionCost MaxCost(1); 458 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 459 evictInterference(VirtReg, Hint, NewVRegs); 460 return Hint; 461 } 462 } 463 464 // Try to evict interference from a cheaper alternative. 465 unsigned Cost = TRI->getCostPerUse(PhysReg); 466 467 // Most registers have 0 additional cost. 468 if (!Cost) 469 return PhysReg; 470 471 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 472 << '\n'); 473 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 474 return CheapReg ? CheapReg : PhysReg; 475} 476 477 478//===----------------------------------------------------------------------===// 479// Interference eviction 480//===----------------------------------------------------------------------===// 481 482/// shouldEvict - determine if A should evict the assigned live range B. The 483/// eviction policy defined by this function together with the allocation order 484/// defined by enqueue() decides which registers ultimately end up being split 485/// and spilled. 486/// 487/// Cascade numbers are used to prevent infinite loops if this function is a 488/// cyclic relation. 489/// 490/// @param A The live range to be assigned. 491/// @param IsHint True when A is about to be assigned to its preferred 492/// register. 493/// @param B The live range to be evicted. 494/// @param BreaksHint True when B is already assigned to its preferred register. 495bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 496 LiveInterval &B, bool BreaksHint) { 497 bool CanSplit = getStage(B) < RS_Spill; 498 499 // Be fairly aggressive about following hints as long as the evictee can be 500 // split. 501 if (CanSplit && IsHint && !BreaksHint) 502 return true; 503 504 return A.weight > B.weight; 505} 506 507/// canEvictInterference - Return true if all interferences between VirtReg and 508/// PhysReg can be evicted. When OnlyCheap is set, don't do anything 509/// 510/// @param VirtReg Live range that is about to be assigned. 511/// @param PhysReg Desired register for assignment. 512/// @prarm IsHint True when PhysReg is VirtReg's preferred register. 513/// @param MaxCost Only look for cheaper candidates and update with new cost 514/// when returning true. 515/// @returns True when interference can be evicted cheaper than MaxCost. 516bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 517 bool IsHint, EvictionCost &MaxCost) { 518 // It is only possible to evict virtual register interference. 519 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 520 return false; 521 522 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 523 // involved in an eviction before. If a cascade number was assigned, deny 524 // evicting anything with the same or a newer cascade number. This prevents 525 // infinite eviction loops. 526 // 527 // This works out so a register without a cascade number is allowed to evict 528 // anything, and it can be evicted by anything. 529 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 530 if (!Cascade) 531 Cascade = NextCascade; 532 533 EvictionCost Cost; 534 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 535 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 536 // If there is 10 or more interferences, chances are one is heavier. 537 if (Q.collectInterferingVRegs(10) >= 10) 538 return false; 539 540 // Check if any interfering live range is heavier than MaxWeight. 541 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 542 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 543 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 544 "Only expecting virtual register interference from query"); 545 // Never evict spill products. They cannot split or spill. 546 if (getStage(*Intf) == RS_Done) 547 return false; 548 // Once a live range becomes small enough, it is urgent that we find a 549 // register for it. This is indicated by an infinite spill weight. These 550 // urgent live ranges get to evict almost anything. 551 // 552 // Also allow urgent evictions of unspillable ranges from a strictly 553 // larger allocation order. 554 bool Urgent = !VirtReg.isSpillable() && 555 (Intf->isSpillable() || 556 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 557 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 558 // Only evict older cascades or live ranges without a cascade. 559 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 560 if (Cascade <= IntfCascade) { 561 if (!Urgent) 562 return false; 563 // We permit breaking cascades for urgent evictions. It should be the 564 // last resort, though, so make it really expensive. 565 Cost.BrokenHints += 10; 566 } 567 // Would this break a satisfied hint? 568 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 569 // Update eviction cost. 570 Cost.BrokenHints += BreaksHint; 571 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 572 // Abort if this would be too expensive. 573 if (!(Cost < MaxCost)) 574 return false; 575 // Finally, apply the eviction policy for non-urgent evictions. 576 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 577 return false; 578 } 579 } 580 MaxCost = Cost; 581 return true; 582} 583 584/// evictInterference - Evict any interferring registers that prevent VirtReg 585/// from being assigned to Physreg. This assumes that canEvictInterference 586/// returned true. 587void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 588 SmallVectorImpl<LiveInterval*> &NewVRegs) { 589 // Make sure that VirtReg has a cascade number, and assign that cascade 590 // number to every evicted register. These live ranges than then only be 591 // evicted by a newer cascade, preventing infinite loops. 592 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 593 if (!Cascade) 594 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 595 596 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 597 << " interference: Cascade " << Cascade << '\n'); 598 599 // Collect all interfering virtregs first. 600 SmallVector<LiveInterval*, 8> Intfs; 601 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 602 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 603 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 604 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 605 Intfs.append(IVR.begin(), IVR.end()); 606 } 607 608 // Evict them second. This will invalidate the queries. 609 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 610 LiveInterval *Intf = Intfs[i]; 611 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 612 if (!VRM->hasPhys(Intf->reg)) 613 continue; 614 Matrix->unassign(*Intf); 615 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 616 VirtReg.isSpillable() < Intf->isSpillable()) && 617 "Cannot decrease cascade number, illegal eviction"); 618 ExtraRegInfo[Intf->reg].Cascade = Cascade; 619 ++NumEvicted; 620 NewVRegs.push_back(Intf); 621 } 622} 623 624/// tryEvict - Try to evict all interferences for a physreg. 625/// @param VirtReg Currently unassigned virtual register. 626/// @param Order Physregs to try. 627/// @return Physreg to assign VirtReg, or 0. 628unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 629 AllocationOrder &Order, 630 SmallVectorImpl<LiveInterval*> &NewVRegs, 631 unsigned CostPerUseLimit) { 632 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 633 634 // Keep track of the cheapest interference seen so far. 635 EvictionCost BestCost(~0u); 636 unsigned BestPhys = 0; 637 638 // When we are just looking for a reduced cost per use, don't break any 639 // hints, and only evict smaller spill weights. 640 if (CostPerUseLimit < ~0u) { 641 BestCost.BrokenHints = 0; 642 BestCost.MaxWeight = VirtReg.weight; 643 } 644 645 Order.rewind(); 646 while (unsigned PhysReg = Order.next()) { 647 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 648 continue; 649 // The first use of a callee-saved register in a function has cost 1. 650 // Don't start using a CSR when the CostPerUseLimit is low. 651 if (CostPerUseLimit == 1) 652 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 653 if (!MRI->isPhysRegUsed(CSR)) { 654 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 655 << PrintReg(CSR, TRI) << '\n'); 656 continue; 657 } 658 659 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 660 continue; 661 662 // Best so far. 663 BestPhys = PhysReg; 664 665 // Stop if the hint can be used. 666 if (Order.isHint(PhysReg)) 667 break; 668 } 669 670 if (!BestPhys) 671 return 0; 672 673 evictInterference(VirtReg, BestPhys, NewVRegs); 674 return BestPhys; 675} 676 677 678//===----------------------------------------------------------------------===// 679// Region Splitting 680//===----------------------------------------------------------------------===// 681 682/// addSplitConstraints - Fill out the SplitConstraints vector based on the 683/// interference pattern in Physreg and its aliases. Add the constraints to 684/// SpillPlacement and return the static cost of this split in Cost, assuming 685/// that all preferences in SplitConstraints are met. 686/// Return false if there are no bundles with positive bias. 687bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 688 float &Cost) { 689 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 690 691 // Reset interference dependent info. 692 SplitConstraints.resize(UseBlocks.size()); 693 float StaticCost = 0; 694 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 695 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 696 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 697 698 BC.Number = BI.MBB->getNumber(); 699 Intf.moveToBlock(BC.Number); 700 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 701 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 702 BC.ChangesValue = BI.FirstDef; 703 704 if (!Intf.hasInterference()) 705 continue; 706 707 // Number of spill code instructions to insert. 708 unsigned Ins = 0; 709 710 // Interference for the live-in value. 711 if (BI.LiveIn) { 712 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 713 BC.Entry = SpillPlacement::MustSpill, ++Ins; 714 else if (Intf.first() < BI.FirstInstr) 715 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 716 else if (Intf.first() < BI.LastInstr) 717 ++Ins; 718 } 719 720 // Interference for the live-out value. 721 if (BI.LiveOut) { 722 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 723 BC.Exit = SpillPlacement::MustSpill, ++Ins; 724 else if (Intf.last() > BI.LastInstr) 725 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 726 else if (Intf.last() > BI.FirstInstr) 727 ++Ins; 728 } 729 730 // Accumulate the total frequency of inserted spill code. 731 if (Ins) 732 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 733 } 734 Cost = StaticCost; 735 736 // Add constraints for use-blocks. Note that these are the only constraints 737 // that may add a positive bias, it is downhill from here. 738 SpillPlacer->addConstraints(SplitConstraints); 739 return SpillPlacer->scanActiveBundles(); 740} 741 742 743/// addThroughConstraints - Add constraints and links to SpillPlacer from the 744/// live-through blocks in Blocks. 745void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 746 ArrayRef<unsigned> Blocks) { 747 const unsigned GroupSize = 8; 748 SpillPlacement::BlockConstraint BCS[GroupSize]; 749 unsigned TBS[GroupSize]; 750 unsigned B = 0, T = 0; 751 752 for (unsigned i = 0; i != Blocks.size(); ++i) { 753 unsigned Number = Blocks[i]; 754 Intf.moveToBlock(Number); 755 756 if (!Intf.hasInterference()) { 757 assert(T < GroupSize && "Array overflow"); 758 TBS[T] = Number; 759 if (++T == GroupSize) { 760 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 761 T = 0; 762 } 763 continue; 764 } 765 766 assert(B < GroupSize && "Array overflow"); 767 BCS[B].Number = Number; 768 769 // Interference for the live-in value. 770 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 771 BCS[B].Entry = SpillPlacement::MustSpill; 772 else 773 BCS[B].Entry = SpillPlacement::PrefSpill; 774 775 // Interference for the live-out value. 776 if (Intf.last() >= SA->getLastSplitPoint(Number)) 777 BCS[B].Exit = SpillPlacement::MustSpill; 778 else 779 BCS[B].Exit = SpillPlacement::PrefSpill; 780 781 if (++B == GroupSize) { 782 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 783 SpillPlacer->addConstraints(Array); 784 B = 0; 785 } 786 } 787 788 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 789 SpillPlacer->addConstraints(Array); 790 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 791} 792 793void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 794 // Keep track of through blocks that have not been added to SpillPlacer. 795 BitVector Todo = SA->getThroughBlocks(); 796 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 797 unsigned AddedTo = 0; 798#ifndef NDEBUG 799 unsigned Visited = 0; 800#endif 801 802 for (;;) { 803 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 804 // Find new through blocks in the periphery of PrefRegBundles. 805 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 806 unsigned Bundle = NewBundles[i]; 807 // Look at all blocks connected to Bundle in the full graph. 808 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 809 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 810 I != E; ++I) { 811 unsigned Block = *I; 812 if (!Todo.test(Block)) 813 continue; 814 Todo.reset(Block); 815 // This is a new through block. Add it to SpillPlacer later. 816 ActiveBlocks.push_back(Block); 817#ifndef NDEBUG 818 ++Visited; 819#endif 820 } 821 } 822 // Any new blocks to add? 823 if (ActiveBlocks.size() == AddedTo) 824 break; 825 826 // Compute through constraints from the interference, or assume that all 827 // through blocks prefer spilling when forming compact regions. 828 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 829 if (Cand.PhysReg) 830 addThroughConstraints(Cand.Intf, NewBlocks); 831 else 832 // Provide a strong negative bias on through blocks to prevent unwanted 833 // liveness on loop backedges. 834 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 835 AddedTo = ActiveBlocks.size(); 836 837 // Perhaps iterating can enable more bundles? 838 SpillPlacer->iterate(); 839 } 840 DEBUG(dbgs() << ", v=" << Visited); 841} 842 843/// calcCompactRegion - Compute the set of edge bundles that should be live 844/// when splitting the current live range into compact regions. Compact 845/// regions can be computed without looking at interference. They are the 846/// regions formed by removing all the live-through blocks from the live range. 847/// 848/// Returns false if the current live range is already compact, or if the 849/// compact regions would form single block regions anyway. 850bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 851 // Without any through blocks, the live range is already compact. 852 if (!SA->getNumThroughBlocks()) 853 return false; 854 855 // Compact regions don't correspond to any physreg. 856 Cand.reset(IntfCache, 0); 857 858 DEBUG(dbgs() << "Compact region bundles"); 859 860 // Use the spill placer to determine the live bundles. GrowRegion pretends 861 // that all the through blocks have interference when PhysReg is unset. 862 SpillPlacer->prepare(Cand.LiveBundles); 863 864 // The static split cost will be zero since Cand.Intf reports no interference. 865 float Cost; 866 if (!addSplitConstraints(Cand.Intf, Cost)) { 867 DEBUG(dbgs() << ", none.\n"); 868 return false; 869 } 870 871 growRegion(Cand); 872 SpillPlacer->finish(); 873 874 if (!Cand.LiveBundles.any()) { 875 DEBUG(dbgs() << ", none.\n"); 876 return false; 877 } 878 879 DEBUG({ 880 for (int i = Cand.LiveBundles.find_first(); i>=0; 881 i = Cand.LiveBundles.find_next(i)) 882 dbgs() << " EB#" << i; 883 dbgs() << ".\n"; 884 }); 885 return true; 886} 887 888/// calcSpillCost - Compute how expensive it would be to split the live range in 889/// SA around all use blocks instead of forming bundle regions. 890float RAGreedy::calcSpillCost() { 891 float Cost = 0; 892 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 893 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 894 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 895 unsigned Number = BI.MBB->getNumber(); 896 // We normally only need one spill instruction - a load or a store. 897 Cost += SpillPlacer->getBlockFrequency(Number); 898 899 // Unless the value is redefined in the block. 900 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 901 Cost += SpillPlacer->getBlockFrequency(Number); 902 } 903 return Cost; 904} 905 906/// calcGlobalSplitCost - Return the global split cost of following the split 907/// pattern in LiveBundles. This cost should be added to the local cost of the 908/// interference pattern in SplitConstraints. 909/// 910float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 911 float GlobalCost = 0; 912 const BitVector &LiveBundles = Cand.LiveBundles; 913 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 914 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 915 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 916 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 917 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 918 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 919 unsigned Ins = 0; 920 921 if (BI.LiveIn) 922 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 923 if (BI.LiveOut) 924 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 925 if (Ins) 926 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 927 } 928 929 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 930 unsigned Number = Cand.ActiveBlocks[i]; 931 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 932 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 933 if (!RegIn && !RegOut) 934 continue; 935 if (RegIn && RegOut) { 936 // We need double spill code if this block has interference. 937 Cand.Intf.moveToBlock(Number); 938 if (Cand.Intf.hasInterference()) 939 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 940 continue; 941 } 942 // live-in / stack-out or stack-in live-out. 943 GlobalCost += SpillPlacer->getBlockFrequency(Number); 944 } 945 return GlobalCost; 946} 947 948/// splitAroundRegion - Split the current live range around the regions 949/// determined by BundleCand and GlobalCand. 950/// 951/// Before calling this function, GlobalCand and BundleCand must be initialized 952/// so each bundle is assigned to a valid candidate, or NoCand for the 953/// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 954/// objects must be initialized for the current live range, and intervals 955/// created for the used candidates. 956/// 957/// @param LREdit The LiveRangeEdit object handling the current split. 958/// @param UsedCands List of used GlobalCand entries. Every BundleCand value 959/// must appear in this list. 960void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 961 ArrayRef<unsigned> UsedCands) { 962 // These are the intervals created for new global ranges. We may create more 963 // intervals for local ranges. 964 const unsigned NumGlobalIntvs = LREdit.size(); 965 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 966 assert(NumGlobalIntvs && "No global intervals configured"); 967 968 // Isolate even single instructions when dealing with a proper sub-class. 969 // That guarantees register class inflation for the stack interval because it 970 // is all copies. 971 unsigned Reg = SA->getParent().reg; 972 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 973 974 // First handle all the blocks with uses. 975 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 976 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 977 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 978 unsigned Number = BI.MBB->getNumber(); 979 unsigned IntvIn = 0, IntvOut = 0; 980 SlotIndex IntfIn, IntfOut; 981 if (BI.LiveIn) { 982 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 983 if (CandIn != NoCand) { 984 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 985 IntvIn = Cand.IntvIdx; 986 Cand.Intf.moveToBlock(Number); 987 IntfIn = Cand.Intf.first(); 988 } 989 } 990 if (BI.LiveOut) { 991 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 992 if (CandOut != NoCand) { 993 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 994 IntvOut = Cand.IntvIdx; 995 Cand.Intf.moveToBlock(Number); 996 IntfOut = Cand.Intf.last(); 997 } 998 } 999 1000 // Create separate intervals for isolated blocks with multiple uses. 1001 if (!IntvIn && !IntvOut) { 1002 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1003 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1004 SE->splitSingleBlock(BI); 1005 continue; 1006 } 1007 1008 if (IntvIn && IntvOut) 1009 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1010 else if (IntvIn) 1011 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1012 else 1013 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1014 } 1015 1016 // Handle live-through blocks. The relevant live-through blocks are stored in 1017 // the ActiveBlocks list with each candidate. We need to filter out 1018 // duplicates. 1019 BitVector Todo = SA->getThroughBlocks(); 1020 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1021 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1022 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1023 unsigned Number = Blocks[i]; 1024 if (!Todo.test(Number)) 1025 continue; 1026 Todo.reset(Number); 1027 1028 unsigned IntvIn = 0, IntvOut = 0; 1029 SlotIndex IntfIn, IntfOut; 1030 1031 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1032 if (CandIn != NoCand) { 1033 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1034 IntvIn = Cand.IntvIdx; 1035 Cand.Intf.moveToBlock(Number); 1036 IntfIn = Cand.Intf.first(); 1037 } 1038 1039 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1040 if (CandOut != NoCand) { 1041 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1042 IntvOut = Cand.IntvIdx; 1043 Cand.Intf.moveToBlock(Number); 1044 IntfOut = Cand.Intf.last(); 1045 } 1046 if (!IntvIn && !IntvOut) 1047 continue; 1048 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1049 } 1050 } 1051 1052 ++NumGlobalSplits; 1053 1054 SmallVector<unsigned, 8> IntvMap; 1055 SE->finish(&IntvMap); 1056 DebugVars->splitRegister(Reg, LREdit.regs()); 1057 1058 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1059 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1060 1061 // Sort out the new intervals created by splitting. We get four kinds: 1062 // - Remainder intervals should not be split again. 1063 // - Candidate intervals can be assigned to Cand.PhysReg. 1064 // - Block-local splits are candidates for local splitting. 1065 // - DCE leftovers should go back on the queue. 1066 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1067 LiveInterval &Reg = *LREdit.get(i); 1068 1069 // Ignore old intervals from DCE. 1070 if (getStage(Reg) != RS_New) 1071 continue; 1072 1073 // Remainder interval. Don't try splitting again, spill if it doesn't 1074 // allocate. 1075 if (IntvMap[i] == 0) { 1076 setStage(Reg, RS_Spill); 1077 continue; 1078 } 1079 1080 // Global intervals. Allow repeated splitting as long as the number of live 1081 // blocks is strictly decreasing. 1082 if (IntvMap[i] < NumGlobalIntvs) { 1083 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1084 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1085 << " blocks as original.\n"); 1086 // Don't allow repeated splitting as a safe guard against looping. 1087 setStage(Reg, RS_Split2); 1088 } 1089 continue; 1090 } 1091 1092 // Other intervals are treated as new. This includes local intervals created 1093 // for blocks with multiple uses, and anything created by DCE. 1094 } 1095 1096 if (VerifyEnabled) 1097 MF->verify(this, "After splitting live range around region"); 1098} 1099 1100unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1101 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1102 unsigned NumCands = 0; 1103 unsigned BestCand = NoCand; 1104 float BestCost; 1105 SmallVector<unsigned, 8> UsedCands; 1106 1107 // Check if we can split this live range around a compact region. 1108 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1109 if (HasCompact) { 1110 // Yes, keep GlobalCand[0] as the compact region candidate. 1111 NumCands = 1; 1112 BestCost = HUGE_VALF; 1113 } else { 1114 // No benefit from the compact region, our fallback will be per-block 1115 // splitting. Make sure we find a solution that is cheaper than spilling. 1116 BestCost = Hysteresis * calcSpillCost(); 1117 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1118 } 1119 1120 Order.rewind(); 1121 while (unsigned PhysReg = Order.next()) { 1122 // Discard bad candidates before we run out of interference cache cursors. 1123 // This will only affect register classes with a lot of registers (>32). 1124 if (NumCands == IntfCache.getMaxCursors()) { 1125 unsigned WorstCount = ~0u; 1126 unsigned Worst = 0; 1127 for (unsigned i = 0; i != NumCands; ++i) { 1128 if (i == BestCand || !GlobalCand[i].PhysReg) 1129 continue; 1130 unsigned Count = GlobalCand[i].LiveBundles.count(); 1131 if (Count < WorstCount) 1132 Worst = i, WorstCount = Count; 1133 } 1134 --NumCands; 1135 GlobalCand[Worst] = GlobalCand[NumCands]; 1136 if (BestCand == NumCands) 1137 BestCand = Worst; 1138 } 1139 1140 if (GlobalCand.size() <= NumCands) 1141 GlobalCand.resize(NumCands+1); 1142 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1143 Cand.reset(IntfCache, PhysReg); 1144 1145 SpillPlacer->prepare(Cand.LiveBundles); 1146 float Cost; 1147 if (!addSplitConstraints(Cand.Intf, Cost)) { 1148 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1149 continue; 1150 } 1151 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1152 if (Cost >= BestCost) { 1153 DEBUG({ 1154 if (BestCand == NoCand) 1155 dbgs() << " worse than no bundles\n"; 1156 else 1157 dbgs() << " worse than " 1158 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1159 }); 1160 continue; 1161 } 1162 growRegion(Cand); 1163 1164 SpillPlacer->finish(); 1165 1166 // No live bundles, defer to splitSingleBlocks(). 1167 if (!Cand.LiveBundles.any()) { 1168 DEBUG(dbgs() << " no bundles.\n"); 1169 continue; 1170 } 1171 1172 Cost += calcGlobalSplitCost(Cand); 1173 DEBUG({ 1174 dbgs() << ", total = " << Cost << " with bundles"; 1175 for (int i = Cand.LiveBundles.find_first(); i>=0; 1176 i = Cand.LiveBundles.find_next(i)) 1177 dbgs() << " EB#" << i; 1178 dbgs() << ".\n"; 1179 }); 1180 if (Cost < BestCost) { 1181 BestCand = NumCands; 1182 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1183 } 1184 ++NumCands; 1185 } 1186 1187 // No solutions found, fall back to single block splitting. 1188 if (!HasCompact && BestCand == NoCand) 1189 return 0; 1190 1191 // Prepare split editor. 1192 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1193 SE->reset(LREdit, SplitSpillMode); 1194 1195 // Assign all edge bundles to the preferred candidate, or NoCand. 1196 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1197 1198 // Assign bundles for the best candidate region. 1199 if (BestCand != NoCand) { 1200 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1201 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1202 UsedCands.push_back(BestCand); 1203 Cand.IntvIdx = SE->openIntv(); 1204 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1205 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1206 (void)B; 1207 } 1208 } 1209 1210 // Assign bundles for the compact region. 1211 if (HasCompact) { 1212 GlobalSplitCandidate &Cand = GlobalCand.front(); 1213 assert(!Cand.PhysReg && "Compact region has no physreg"); 1214 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1215 UsedCands.push_back(0); 1216 Cand.IntvIdx = SE->openIntv(); 1217 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1218 << Cand.IntvIdx << ".\n"); 1219 (void)B; 1220 } 1221 } 1222 1223 splitAroundRegion(LREdit, UsedCands); 1224 return 0; 1225} 1226 1227 1228//===----------------------------------------------------------------------===// 1229// Per-Block Splitting 1230//===----------------------------------------------------------------------===// 1231 1232/// tryBlockSplit - Split a global live range around every block with uses. This 1233/// creates a lot of local live ranges, that will be split by tryLocalSplit if 1234/// they don't allocate. 1235unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1236 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1237 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1238 unsigned Reg = VirtReg.reg; 1239 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1240 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1241 SE->reset(LREdit, SplitSpillMode); 1242 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1243 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1244 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1245 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1246 SE->splitSingleBlock(BI); 1247 } 1248 // No blocks were split. 1249 if (LREdit.empty()) 1250 return 0; 1251 1252 // We did split for some blocks. 1253 SmallVector<unsigned, 8> IntvMap; 1254 SE->finish(&IntvMap); 1255 1256 // Tell LiveDebugVariables about the new ranges. 1257 DebugVars->splitRegister(Reg, LREdit.regs()); 1258 1259 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1260 1261 // Sort out the new intervals created by splitting. The remainder interval 1262 // goes straight to spilling, the new local ranges get to stay RS_New. 1263 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1264 LiveInterval &LI = *LREdit.get(i); 1265 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1266 setStage(LI, RS_Spill); 1267 } 1268 1269 if (VerifyEnabled) 1270 MF->verify(this, "After splitting live range around basic blocks"); 1271 return 0; 1272} 1273 1274 1275//===----------------------------------------------------------------------===// 1276// Per-Instruction Splitting 1277//===----------------------------------------------------------------------===// 1278 1279/// tryInstructionSplit - Split a live range around individual instructions. 1280/// This is normally not worthwhile since the spiller is doing essentially the 1281/// same thing. However, when the live range is in a constrained register 1282/// class, it may help to insert copies such that parts of the live range can 1283/// be moved to a larger register class. 1284/// 1285/// This is similar to spilling to a larger register class. 1286unsigned 1287RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1288 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1289 // There is no point to this if there are no larger sub-classes. 1290 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1291 return 0; 1292 1293 // Always enable split spill mode, since we're effectively spilling to a 1294 // register. 1295 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1296 SE->reset(LREdit, SplitEditor::SM_Size); 1297 1298 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1299 if (Uses.size() <= 1) 1300 return 0; 1301 1302 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1303 1304 // Split around every non-copy instruction. 1305 for (unsigned i = 0; i != Uses.size(); ++i) { 1306 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1307 if (MI->isFullCopy()) { 1308 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1309 continue; 1310 } 1311 SE->openIntv(); 1312 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1313 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1314 SE->useIntv(SegStart, SegStop); 1315 } 1316 1317 if (LREdit.empty()) { 1318 DEBUG(dbgs() << "All uses were copies.\n"); 1319 return 0; 1320 } 1321 1322 SmallVector<unsigned, 8> IntvMap; 1323 SE->finish(&IntvMap); 1324 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1325 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1326 1327 // Assign all new registers to RS_Spill. This was the last chance. 1328 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1329 return 0; 1330} 1331 1332 1333//===----------------------------------------------------------------------===// 1334// Local Splitting 1335//===----------------------------------------------------------------------===// 1336 1337 1338/// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1339/// in order to use PhysReg between two entries in SA->UseSlots. 1340/// 1341/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1342/// 1343void RAGreedy::calcGapWeights(unsigned PhysReg, 1344 SmallVectorImpl<float> &GapWeight) { 1345 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1346 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1347 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1348 const unsigned NumGaps = Uses.size()-1; 1349 1350 // Start and end points for the interference check. 1351 SlotIndex StartIdx = 1352 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1353 SlotIndex StopIdx = 1354 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1355 1356 GapWeight.assign(NumGaps, 0.0f); 1357 1358 // Add interference from each overlapping register. 1359 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1360 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1361 .checkInterference()) 1362 continue; 1363 1364 // We know that VirtReg is a continuous interval from FirstInstr to 1365 // LastInstr, so we don't need InterferenceQuery. 1366 // 1367 // Interference that overlaps an instruction is counted in both gaps 1368 // surrounding the instruction. The exception is interference before 1369 // StartIdx and after StopIdx. 1370 // 1371 LiveIntervalUnion::SegmentIter IntI = 1372 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1373 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1374 // Skip the gaps before IntI. 1375 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1376 if (++Gap == NumGaps) 1377 break; 1378 if (Gap == NumGaps) 1379 break; 1380 1381 // Update the gaps covered by IntI. 1382 const float weight = IntI.value()->weight; 1383 for (; Gap != NumGaps; ++Gap) { 1384 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1385 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1386 break; 1387 } 1388 if (Gap == NumGaps) 1389 break; 1390 } 1391 } 1392 1393 // Add fixed interference. 1394 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1395 const LiveInterval &LI = LIS->getRegUnit(*Units); 1396 LiveInterval::const_iterator I = LI.find(StartIdx); 1397 LiveInterval::const_iterator E = LI.end(); 1398 1399 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1400 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1401 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1402 if (++Gap == NumGaps) 1403 break; 1404 if (Gap == NumGaps) 1405 break; 1406 1407 for (; Gap != NumGaps; ++Gap) { 1408 GapWeight[Gap] = HUGE_VALF; 1409 if (Uses[Gap+1].getBaseIndex() >= I->end) 1410 break; 1411 } 1412 if (Gap == NumGaps) 1413 break; 1414 } 1415 } 1416} 1417 1418/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1419/// basic block. 1420/// 1421unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1422 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1423 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1424 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1425 1426 // Note that it is possible to have an interval that is live-in or live-out 1427 // while only covering a single block - A phi-def can use undef values from 1428 // predecessors, and the block could be a single-block loop. 1429 // We don't bother doing anything clever about such a case, we simply assume 1430 // that the interval is continuous from FirstInstr to LastInstr. We should 1431 // make sure that we don't do anything illegal to such an interval, though. 1432 1433 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1434 if (Uses.size() <= 2) 1435 return 0; 1436 const unsigned NumGaps = Uses.size()-1; 1437 1438 DEBUG({ 1439 dbgs() << "tryLocalSplit: "; 1440 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1441 dbgs() << ' ' << Uses[i]; 1442 dbgs() << '\n'; 1443 }); 1444 1445 // If VirtReg is live across any register mask operands, compute a list of 1446 // gaps with register masks. 1447 SmallVector<unsigned, 8> RegMaskGaps; 1448 if (Matrix->checkRegMaskInterference(VirtReg)) { 1449 // Get regmask slots for the whole block. 1450 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1451 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1452 // Constrain to VirtReg's live range. 1453 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1454 Uses.front().getRegSlot()) - RMS.begin(); 1455 unsigned re = RMS.size(); 1456 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1457 // Look for Uses[i] <= RMS <= Uses[i+1]. 1458 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1459 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1460 continue; 1461 // Skip a regmask on the same instruction as the last use. It doesn't 1462 // overlap the live range. 1463 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1464 break; 1465 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1466 RegMaskGaps.push_back(i); 1467 // Advance ri to the next gap. A regmask on one of the uses counts in 1468 // both gaps. 1469 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1470 ++ri; 1471 } 1472 DEBUG(dbgs() << '\n'); 1473 } 1474 1475 // Since we allow local split results to be split again, there is a risk of 1476 // creating infinite loops. It is tempting to require that the new live 1477 // ranges have less instructions than the original. That would guarantee 1478 // convergence, but it is too strict. A live range with 3 instructions can be 1479 // split 2+3 (including the COPY), and we want to allow that. 1480 // 1481 // Instead we use these rules: 1482 // 1483 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1484 // noop split, of course). 1485 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1486 // the new ranges must have fewer instructions than before the split. 1487 // 3. New ranges with the same number of instructions are marked RS_Split2, 1488 // smaller ranges are marked RS_New. 1489 // 1490 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1491 // excessive splitting and infinite loops. 1492 // 1493 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1494 1495 // Best split candidate. 1496 unsigned BestBefore = NumGaps; 1497 unsigned BestAfter = 0; 1498 float BestDiff = 0; 1499 1500 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1501 SmallVector<float, 8> GapWeight; 1502 1503 Order.rewind(); 1504 while (unsigned PhysReg = Order.next()) { 1505 // Keep track of the largest spill weight that would need to be evicted in 1506 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1507 calcGapWeights(PhysReg, GapWeight); 1508 1509 // Remove any gaps with regmask clobbers. 1510 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1511 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1512 GapWeight[RegMaskGaps[i]] = HUGE_VALF; 1513 1514 // Try to find the best sequence of gaps to close. 1515 // The new spill weight must be larger than any gap interference. 1516 1517 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1518 unsigned SplitBefore = 0, SplitAfter = 1; 1519 1520 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1521 // It is the spill weight that needs to be evicted. 1522 float MaxGap = GapWeight[0]; 1523 1524 for (;;) { 1525 // Live before/after split? 1526 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1527 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1528 1529 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1530 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1531 << " i=" << MaxGap); 1532 1533 // Stop before the interval gets so big we wouldn't be making progress. 1534 if (!LiveBefore && !LiveAfter) { 1535 DEBUG(dbgs() << " all\n"); 1536 break; 1537 } 1538 // Should the interval be extended or shrunk? 1539 bool Shrink = true; 1540 1541 // How many gaps would the new range have? 1542 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1543 1544 // Legally, without causing looping? 1545 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1546 1547 if (Legal && MaxGap < HUGE_VALF) { 1548 // Estimate the new spill weight. Each instruction reads or writes the 1549 // register. Conservatively assume there are no read-modify-write 1550 // instructions. 1551 // 1552 // Try to guess the size of the new interval. 1553 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1554 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1555 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1556 // Would this split be possible to allocate? 1557 // Never allocate all gaps, we wouldn't be making progress. 1558 DEBUG(dbgs() << " w=" << EstWeight); 1559 if (EstWeight * Hysteresis >= MaxGap) { 1560 Shrink = false; 1561 float Diff = EstWeight - MaxGap; 1562 if (Diff > BestDiff) { 1563 DEBUG(dbgs() << " (best)"); 1564 BestDiff = Hysteresis * Diff; 1565 BestBefore = SplitBefore; 1566 BestAfter = SplitAfter; 1567 } 1568 } 1569 } 1570 1571 // Try to shrink. 1572 if (Shrink) { 1573 if (++SplitBefore < SplitAfter) { 1574 DEBUG(dbgs() << " shrink\n"); 1575 // Recompute the max when necessary. 1576 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1577 MaxGap = GapWeight[SplitBefore]; 1578 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1579 MaxGap = std::max(MaxGap, GapWeight[i]); 1580 } 1581 continue; 1582 } 1583 MaxGap = 0; 1584 } 1585 1586 // Try to extend the interval. 1587 if (SplitAfter >= NumGaps) { 1588 DEBUG(dbgs() << " end\n"); 1589 break; 1590 } 1591 1592 DEBUG(dbgs() << " extend\n"); 1593 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1594 } 1595 } 1596 1597 // Didn't find any candidates? 1598 if (BestBefore == NumGaps) 1599 return 0; 1600 1601 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1602 << '-' << Uses[BestAfter] << ", " << BestDiff 1603 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1604 1605 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1606 SE->reset(LREdit); 1607 1608 SE->openIntv(); 1609 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1610 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1611 SE->useIntv(SegStart, SegStop); 1612 SmallVector<unsigned, 8> IntvMap; 1613 SE->finish(&IntvMap); 1614 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1615 1616 // If the new range has the same number of instructions as before, mark it as 1617 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1618 // leave the new intervals as RS_New so they can compete. 1619 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1620 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1621 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1622 if (NewGaps >= NumGaps) { 1623 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1624 assert(!ProgressRequired && "Didn't make progress when it was required."); 1625 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1626 if (IntvMap[i] == 1) { 1627 setStage(*LREdit.get(i), RS_Split2); 1628 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1629 } 1630 DEBUG(dbgs() << '\n'); 1631 } 1632 ++NumLocalSplits; 1633 1634 return 0; 1635} 1636 1637//===----------------------------------------------------------------------===// 1638// Live Range Splitting 1639//===----------------------------------------------------------------------===// 1640 1641/// trySplit - Try to split VirtReg or one of its interferences, making it 1642/// assignable. 1643/// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1644unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1645 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1646 // Ranges must be Split2 or less. 1647 if (getStage(VirtReg) >= RS_Spill) 1648 return 0; 1649 1650 // Local intervals are handled separately. 1651 if (LIS->intervalIsInOneMBB(VirtReg)) { 1652 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1653 SA->analyze(&VirtReg); 1654 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1655 if (PhysReg || !NewVRegs.empty()) 1656 return PhysReg; 1657 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1658 } 1659 1660 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1661 1662 SA->analyze(&VirtReg); 1663 1664 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1665 // coalescer. That may cause the range to become allocatable which means that 1666 // tryRegionSplit won't be making progress. This check should be replaced with 1667 // an assertion when the coalescer is fixed. 1668 if (SA->didRepairRange()) { 1669 // VirtReg has changed, so all cached queries are invalid. 1670 Matrix->invalidateVirtRegs(); 1671 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1672 return PhysReg; 1673 } 1674 1675 // First try to split around a region spanning multiple blocks. RS_Split2 1676 // ranges already made dubious progress with region splitting, so they go 1677 // straight to single block splitting. 1678 if (getStage(VirtReg) < RS_Split2) { 1679 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1680 if (PhysReg || !NewVRegs.empty()) 1681 return PhysReg; 1682 } 1683 1684 // Then isolate blocks. 1685 return tryBlockSplit(VirtReg, Order, NewVRegs); 1686} 1687 1688 1689//===----------------------------------------------------------------------===// 1690// Main Entry Point 1691//===----------------------------------------------------------------------===// 1692 1693unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1694 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1695 // First try assigning a free register. 1696 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1697 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1698 return PhysReg; 1699 1700 LiveRangeStage Stage = getStage(VirtReg); 1701 DEBUG(dbgs() << StageName[Stage] 1702 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1703 1704 // Try to evict a less worthy live range, but only for ranges from the primary 1705 // queue. The RS_Split ranges already failed to do this, and they should not 1706 // get a second chance until they have been split. 1707 if (Stage != RS_Split) 1708 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1709 return PhysReg; 1710 1711 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1712 1713 // The first time we see a live range, don't try to split or spill. 1714 // Wait until the second time, when all smaller ranges have been allocated. 1715 // This gives a better picture of the interference to split around. 1716 if (Stage < RS_Split) { 1717 setStage(VirtReg, RS_Split); 1718 DEBUG(dbgs() << "wait for second round\n"); 1719 NewVRegs.push_back(&VirtReg); 1720 return 0; 1721 } 1722 1723 // If we couldn't allocate a register from spilling, there is probably some 1724 // invalid inline assembly. The base class wil report it. 1725 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1726 return ~0u; 1727 1728 // Try splitting VirtReg or interferences. 1729 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1730 if (PhysReg || !NewVRegs.empty()) 1731 return PhysReg; 1732 1733 // Finally spill VirtReg itself. 1734 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1735 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1736 spiller().spill(LRE); 1737 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1738 1739 if (VerifyEnabled) 1740 MF->verify(this, "After spilling"); 1741 1742 // The live virtual register requesting allocation was spilled, so tell 1743 // the caller not to allocate anything during this round. 1744 return 0; 1745} 1746 1747bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1748 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1749 << "********** Function: " 1750 << ((Value*)mf.getFunction())->getName() << '\n'); 1751 1752 MF = &mf; 1753 if (VerifyEnabled) 1754 MF->verify(this, "Before greedy register allocator"); 1755 1756 RegAllocBase::init(getAnalysis<VirtRegMap>(), 1757 getAnalysis<LiveIntervals>(), 1758 getAnalysis<LiveRegMatrix>()); 1759 Indexes = &getAnalysis<SlotIndexes>(); 1760 DomTree = &getAnalysis<MachineDominatorTree>(); 1761 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1762 Loops = &getAnalysis<MachineLoopInfo>(); 1763 Bundles = &getAnalysis<EdgeBundles>(); 1764 SpillPlacer = &getAnalysis<SpillPlacement>(); 1765 DebugVars = &getAnalysis<LiveDebugVariables>(); 1766 1767 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1768 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1769 ExtraRegInfo.clear(); 1770 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1771 NextCascade = 1; 1772 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 1773 GlobalCand.resize(32); // This will grow as needed. 1774 1775 allocatePhysRegs(); 1776 releaseMemory(); 1777 return true; 1778} 1779