chaitin.cpp revision 4514:8373c19be854
1/*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileLog.hpp"
27#include "compiler/oopMap.hpp"
28#include "memory/allocation.inline.hpp"
29#include "opto/addnode.hpp"
30#include "opto/block.hpp"
31#include "opto/callnode.hpp"
32#include "opto/cfgnode.hpp"
33#include "opto/chaitin.hpp"
34#include "opto/coalesce.hpp"
35#include "opto/connode.hpp"
36#include "opto/idealGraphPrinter.hpp"
37#include "opto/indexSet.hpp"
38#include "opto/machnode.hpp"
39#include "opto/memnode.hpp"
40#include "opto/opcodes.hpp"
41#include "opto/rootnode.hpp"
42
43//=============================================================================
44
45#ifndef PRODUCT
46void LRG::dump( ) const {
47  ttyLocker ttyl;
48  tty->print("%d ",num_regs());
49  _mask.dump();
50  if( _msize_valid ) {
51    if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
52    else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
53  } else {
54    tty->print(", #?(%d) ",_mask.Size());
55  }
56
57  tty->print("EffDeg: ");
58  if( _degree_valid ) tty->print( "%d ", _eff_degree );
59  else tty->print("? ");
60
61  if( is_multidef() ) {
62    tty->print("MultiDef ");
63    if (_defs != NULL) {
64      tty->print("(");
65      for (int i = 0; i < _defs->length(); i++) {
66        tty->print("N%d ", _defs->at(i)->_idx);
67      }
68      tty->print(") ");
69    }
70  }
71  else if( _def == 0 ) tty->print("Dead ");
72  else tty->print("Def: N%d ",_def->_idx);
73
74  tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
75  // Flags
76  if( _is_oop ) tty->print("Oop ");
77  if( _is_float ) tty->print("Float ");
78  if( _is_vector ) tty->print("Vector ");
79  if( _was_spilled1 ) tty->print("Spilled ");
80  if( _was_spilled2 ) tty->print("Spilled2 ");
81  if( _direct_conflict ) tty->print("Direct_conflict ");
82  if( _fat_proj ) tty->print("Fat ");
83  if( _was_lo ) tty->print("Lo ");
84  if( _has_copy ) tty->print("Copy ");
85  if( _at_risk ) tty->print("Risk ");
86
87  if( _must_spill ) tty->print("Must_spill ");
88  if( _is_bound ) tty->print("Bound ");
89  if( _msize_valid ) {
90    if( _degree_valid && lo_degree() ) tty->print("Trivial ");
91  }
92
93  tty->cr();
94}
95#endif
96
97//------------------------------score------------------------------------------
98// Compute score from cost and area.  Low score is best to spill.
99static double raw_score( double cost, double area ) {
100  return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
101}
102
103double LRG::score() const {
104  // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
105  // Bigger area lowers score, encourages spilling this live range.
106  // Bigger cost raise score, prevents spilling this live range.
107  // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
108  // to turn a divide by a constant into a multiply by the reciprical).
109  double score = raw_score( _cost, _area);
110
111  // Account for area.  Basically, LRGs covering large areas are better
112  // to spill because more other LRGs get freed up.
113  if( _area == 0.0 )            // No area?  Then no progress to spill
114    return 1e35;
115
116  if( _was_spilled2 )           // If spilled once before, we are unlikely
117    return score + 1e30;        // to make progress again.
118
119  if( _cost >= _area*3.0 )      // Tiny area relative to cost
120    return score + 1e17;        // Probably no progress to spill
121
122  if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
123    return score + 1e10;        // Likely no progress to spill
124
125  return score;
126}
127
128//------------------------------LRG_List---------------------------------------
129LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
130  memset( _lidxs, 0, sizeof(uint)*max );
131}
132
133void LRG_List::extend( uint nidx, uint lidx ) {
134  _nesting.check();
135  if( nidx >= _max ) {
136    uint size = 16;
137    while( size <= nidx ) size <<=1;
138    _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
139    _max = size;
140  }
141  while( _cnt <= nidx )
142    _lidxs[_cnt++] = 0;
143  _lidxs[nidx] = lidx;
144}
145
146#define NUMBUCKS 3
147
148// Straight out of Tarjan's union-find algorithm
149uint LiveRangeMap::find_compress(uint lrg) {
150  uint cur = lrg;
151  uint next = _uf_map[cur];
152  while (next != cur) { // Scan chain of equivalences
153    assert( next < cur, "always union smaller");
154    cur = next; // until find a fixed-point
155    next = _uf_map[cur];
156  }
157
158  // Core of union-find algorithm: update chain of
159  // equivalences to be equal to the root.
160  while (lrg != next) {
161    uint tmp = _uf_map[lrg];
162    _uf_map.map(lrg, next);
163    lrg = tmp;
164  }
165  return lrg;
166}
167
168// Reset the Union-Find map to identity
169void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
170  _max_lrg_id= max_lrg_id;
171  // Force the Union-Find mapping to be at least this large
172  _uf_map.extend(_max_lrg_id, 0);
173  // Initialize it to be the ID mapping.
174  for (uint i = 0; i < _max_lrg_id; ++i) {
175    _uf_map.map(i, i);
176  }
177}
178
179// Make all Nodes map directly to their final live range; no need for
180// the Union-Find mapping after this call.
181void LiveRangeMap::compress_uf_map_for_nodes() {
182  // For all Nodes, compress mapping
183  uint unique = _names.Size();
184  for (uint i = 0; i < unique; ++i) {
185    uint lrg = _names[i];
186    uint compressed_lrg = find(lrg);
187    if (lrg != compressed_lrg) {
188      _names.map(i, compressed_lrg);
189    }
190  }
191}
192
193// Like Find above, but no path compress, so bad asymptotic behavior
194uint LiveRangeMap::find_const(uint lrg) const {
195  if (!lrg) {
196    return lrg; // Ignore the zero LRG
197  }
198
199  // Off the end?  This happens during debugging dumps when you got
200  // brand new live ranges but have not told the allocator yet.
201  if (lrg >= _max_lrg_id) {
202    return lrg;
203  }
204
205  uint next = _uf_map[lrg];
206  while (next != lrg) { // Scan chain of equivalences
207    assert(next < lrg, "always union smaller");
208    lrg = next; // until find a fixed-point
209    next = _uf_map[lrg];
210  }
211  return next;
212}
213
214//------------------------------Chaitin----------------------------------------
215PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
216  : PhaseRegAlloc(unique, cfg, matcher,
217#ifndef PRODUCT
218       print_chaitin_statistics
219#else
220       NULL
221#endif
222       )
223  , _lrg_map(unique)
224  , _live(0)
225  , _spilled_once(Thread::current()->resource_area())
226  , _spilled_twice(Thread::current()->resource_area())
227  , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
228  , _oldphi(unique)
229#ifndef PRODUCT
230  , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
231#endif
232{
233  NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
234
235  _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
236
237  // Build a list of basic blocks, sorted by frequency
238  _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
239  // Experiment with sorting strategies to speed compilation
240  double  cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
241  Block **buckets[NUMBUCKS];             // Array of buckets
242  uint    buckcnt[NUMBUCKS];             // Array of bucket counters
243  double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
244  for (uint i = 0; i < NUMBUCKS; i++) {
245    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
246    buckcnt[i] = 0;
247    // Bump by three orders of magnitude each time
248    cutoff *= 0.001;
249    buckval[i] = cutoff;
250    for (uint j = 0; j < _cfg._num_blocks; j++) {
251      buckets[i][j] = NULL;
252    }
253  }
254  // Sort blocks into buckets
255  for (uint i = 0; i < _cfg._num_blocks; i++) {
256    for (uint j = 0; j < NUMBUCKS; j++) {
257      if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
258        // Assign block to end of list for appropriate bucket
259        buckets[j][buckcnt[j]++] = _cfg._blocks[i];
260        break; // kick out of inner loop
261      }
262    }
263  }
264  // Dump buckets into final block array
265  uint blkcnt = 0;
266  for (uint i = 0; i < NUMBUCKS; i++) {
267    for (uint j = 0; j < buckcnt[i]; j++) {
268      _blks[blkcnt++] = buckets[i][j];
269    }
270  }
271
272  assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
273}
274
275//------------------------------Union------------------------------------------
276// union 2 sets together.
277void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
278  uint src = _lrg_map.find(src_n);
279  uint dst = _lrg_map.find(dst_n);
280  assert(src, "");
281  assert(dst, "");
282  assert(src < _lrg_map.max_lrg_id(), "oob");
283  assert(dst < _lrg_map.max_lrg_id(), "oob");
284  assert(src < dst, "always union smaller");
285  _lrg_map.uf_map(dst, src);
286}
287
288//------------------------------new_lrg----------------------------------------
289void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
290  // Make the Node->LRG mapping
291  _lrg_map.extend(x->_idx,lrg);
292  // Make the Union-Find mapping an identity function
293  _lrg_map.uf_extend(lrg, lrg);
294}
295
296
297bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
298  Block *bcon = _cfg._bbs[con->_idx];
299  uint cindex = bcon->find_node(con);
300  Node *con_next = bcon->_nodes[cindex+1];
301  if (con_next->in(0) != con || !con_next->is_MachProj()) {
302    return false;               // No MachProj's follow
303  }
304
305  // Copy kills after the cloned constant
306  Node *kills = con_next->clone();
307  kills->set_req(0, copy);
308  b->_nodes.insert(idx, kills);
309  _cfg._bbs.map(kills->_idx, b);
310  new_lrg(kills, max_lrg_id);
311  return true;
312}
313
314//------------------------------compact----------------------------------------
315// Renumber the live ranges to compact them.  Makes the IFG smaller.
316void PhaseChaitin::compact() {
317  // Current the _uf_map contains a series of short chains which are headed
318  // by a self-cycle.  All the chains run from big numbers to little numbers.
319  // The Find() call chases the chains & shortens them for the next Find call.
320  // We are going to change this structure slightly.  Numbers above a moving
321  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
322  // compacted live range with no further chaining.  There are no chains or
323  // cycles below 'i', so the Find call no longer works.
324  uint j=1;
325  uint i;
326  for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
327    uint lr = _lrg_map.uf_live_range_id(i);
328    // Ignore unallocated live ranges
329    if (!lr) {
330      continue;
331    }
332    assert(lr <= i, "");
333    _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
334  }
335  // Now change the Node->LR mapping to reflect the compacted names
336  uint unique = _lrg_map.size();
337  for (i = 0; i < unique; i++) {
338    uint lrg_id = _lrg_map.live_range_id(i);
339    _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
340  }
341
342  // Reset the Union-Find mapping
343  _lrg_map.reset_uf_map(j);
344}
345
346void PhaseChaitin::Register_Allocate() {
347
348  // Above the OLD FP (and in registers) are the incoming arguments.  Stack
349  // slots in this area are called "arg_slots".  Above the NEW FP (and in
350  // registers) is the outgoing argument area; above that is the spill/temp
351  // area.  These are all "frame_slots".  Arg_slots start at the zero
352  // stack_slots and count up to the known arg_size.  Frame_slots start at
353  // the stack_slot #arg_size and go up.  After allocation I map stack
354  // slots to actual offsets.  Stack-slots in the arg_slot area are biased
355  // by the frame_size; stack-slots in the frame_slot area are biased by 0.
356
357  _trip_cnt = 0;
358  _alternate = 0;
359  _matcher._allocation_started = true;
360
361  ResourceArea split_arena;     // Arena for Split local resources
362  ResourceArea live_arena;      // Arena for liveness & IFG info
363  ResourceMark rm(&live_arena);
364
365  // Need live-ness for the IFG; need the IFG for coalescing.  If the
366  // liveness is JUST for coalescing, then I can get some mileage by renaming
367  // all copy-related live ranges low and then using the max copy-related
368  // live range as a cut-off for LIVE and the IFG.  In other words, I can
369  // build a subset of LIVE and IFG just for copies.
370  PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
371
372  // Need IFG for coalescing and coloring
373  PhaseIFG ifg(&live_arena);
374  _ifg = &ifg;
375
376  // Come out of SSA world to the Named world.  Assign (virtual) registers to
377  // Nodes.  Use the same register for all inputs and the output of PhiNodes
378  // - effectively ending SSA form.  This requires either coalescing live
379  // ranges or inserting copies.  For the moment, we insert "virtual copies"
380  // - we pretend there is a copy prior to each Phi in predecessor blocks.
381  // We will attempt to coalesce such "virtual copies" before we manifest
382  // them for real.
383  de_ssa();
384
385#ifdef ASSERT
386  // Veify the graph before RA.
387  verify(&live_arena);
388#endif
389
390  {
391    NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
392    _live = NULL;                 // Mark live as being not available
393    rm.reset_to_mark();           // Reclaim working storage
394    IndexSet::reset_memory(C, &live_arena);
395    ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
396    gather_lrg_masks( false );    // Collect LRG masks
397    live.compute(_lrg_map.max_lrg_id()); // Compute liveness
398    _live = &live;                // Mark LIVE as being available
399  }
400
401  // Base pointers are currently "used" by instructions which define new
402  // derived pointers.  This makes base pointers live up to the where the
403  // derived pointer is made, but not beyond.  Really, they need to be live
404  // across any GC point where the derived value is live.  So this code looks
405  // at all the GC points, and "stretches" the live range of any base pointer
406  // to the GC point.
407  if (stretch_base_pointer_live_ranges(&live_arena)) {
408    NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
409    // Since some live range stretched, I need to recompute live
410    _live = NULL;
411    rm.reset_to_mark();         // Reclaim working storage
412    IndexSet::reset_memory(C, &live_arena);
413    ifg.init(_lrg_map.max_lrg_id());
414    gather_lrg_masks(false);
415    live.compute(_lrg_map.max_lrg_id());
416    _live = &live;
417  }
418  // Create the interference graph using virtual copies
419  build_ifg_virtual();  // Include stack slots this time
420
421  // Aggressive (but pessimistic) copy coalescing.
422  // This pass works on virtual copies.  Any virtual copies which are not
423  // coalesced get manifested as actual copies
424  {
425    // The IFG is/was triangular.  I am 'squaring it up' so Union can run
426    // faster.  Union requires a 'for all' operation which is slow on the
427    // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
428    // meaning I can visit all the Nodes neighbors less than a Node in time
429    // O(# of neighbors), but I have to visit all the Nodes greater than a
430    // given Node and search them for an instance, i.e., time O(#MaxLRG)).
431    _ifg->SquareUp();
432
433    PhaseAggressiveCoalesce coalesce(*this);
434    coalesce.coalesce_driver();
435    // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
436    // not match the Phi itself, insert a copy.
437    coalesce.insert_copies(_matcher);
438  }
439
440  // After aggressive coalesce, attempt a first cut at coloring.
441  // To color, we need the IFG and for that we need LIVE.
442  {
443    NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
444    _live = NULL;
445    rm.reset_to_mark();           // Reclaim working storage
446    IndexSet::reset_memory(C, &live_arena);
447    ifg.init(_lrg_map.max_lrg_id());
448    gather_lrg_masks( true );
449    live.compute(_lrg_map.max_lrg_id());
450    _live = &live;
451  }
452
453  // Build physical interference graph
454  uint must_spill = 0;
455  must_spill = build_ifg_physical(&live_arena);
456  // If we have a guaranteed spill, might as well spill now
457  if (must_spill) {
458    if(!_lrg_map.max_lrg_id()) {
459      return;
460    }
461    // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
462    C->check_node_count(10*must_spill, "out of nodes before split");
463    if (C->failing()) {
464      return;
465    }
466
467    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
468    _lrg_map.set_max_lrg_id(new_max_lrg_id);
469    // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
470    // or we failed to split
471    C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
472    if (C->failing()) {
473      return;
474    }
475
476    NOT_PRODUCT(C->verify_graph_edges();)
477
478    compact();                  // Compact LRGs; return new lower max lrg
479
480    {
481      NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
482      _live = NULL;
483      rm.reset_to_mark();         // Reclaim working storage
484      IndexSet::reset_memory(C, &live_arena);
485      ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
486      gather_lrg_masks( true );   // Collect intersect mask
487      live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
488      _live = &live;
489    }
490    build_ifg_physical(&live_arena);
491    _ifg->SquareUp();
492    _ifg->Compute_Effective_Degree();
493    // Only do conservative coalescing if requested
494    if (OptoCoalesce) {
495      // Conservative (and pessimistic) copy coalescing of those spills
496      PhaseConservativeCoalesce coalesce(*this);
497      // If max live ranges greater than cutoff, don't color the stack.
498      // This cutoff can be larger than below since it is only done once.
499      coalesce.coalesce_driver();
500    }
501    _lrg_map.compress_uf_map_for_nodes();
502
503#ifdef ASSERT
504    verify(&live_arena, true);
505#endif
506  } else {
507    ifg.SquareUp();
508    ifg.Compute_Effective_Degree();
509#ifdef ASSERT
510    set_was_low();
511#endif
512  }
513
514  // Prepare for Simplify & Select
515  cache_lrg_info();           // Count degree of LRGs
516
517  // Simplify the InterFerence Graph by removing LRGs of low degree.
518  // LRGs of low degree are trivially colorable.
519  Simplify();
520
521  // Select colors by re-inserting LRGs back into the IFG in reverse order.
522  // Return whether or not something spills.
523  uint spills = Select( );
524
525  // If we spill, split and recycle the entire thing
526  while( spills ) {
527    if( _trip_cnt++ > 24 ) {
528      DEBUG_ONLY( dump_for_spill_split_recycle(); )
529      if( _trip_cnt > 27 ) {
530        C->record_method_not_compilable("failed spill-split-recycle sanity check");
531        return;
532      }
533    }
534
535    if (!_lrg_map.max_lrg_id()) {
536      return;
537    }
538    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
539    _lrg_map.set_max_lrg_id(new_max_lrg_id);
540    // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
541    C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
542    if (C->failing()) {
543      return;
544    }
545
546    compact(); // Compact LRGs; return new lower max lrg
547
548    // Nuke the live-ness and interference graph and LiveRanGe info
549    {
550      NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
551      _live = NULL;
552      rm.reset_to_mark();         // Reclaim working storage
553      IndexSet::reset_memory(C, &live_arena);
554      ifg.init(_lrg_map.max_lrg_id());
555
556      // Create LiveRanGe array.
557      // Intersect register masks for all USEs and DEFs
558      gather_lrg_masks(true);
559      live.compute(_lrg_map.max_lrg_id());
560      _live = &live;
561    }
562    must_spill = build_ifg_physical(&live_arena);
563    _ifg->SquareUp();
564    _ifg->Compute_Effective_Degree();
565
566    // Only do conservative coalescing if requested
567    if (OptoCoalesce) {
568      // Conservative (and pessimistic) copy coalescing
569      PhaseConservativeCoalesce coalesce(*this);
570      // Check for few live ranges determines how aggressive coalesce is.
571      coalesce.coalesce_driver();
572    }
573    _lrg_map.compress_uf_map_for_nodes();
574#ifdef ASSERT
575    verify(&live_arena, true);
576#endif
577    cache_lrg_info();           // Count degree of LRGs
578
579    // Simplify the InterFerence Graph by removing LRGs of low degree.
580    // LRGs of low degree are trivially colorable.
581    Simplify();
582
583    // Select colors by re-inserting LRGs back into the IFG in reverse order.
584    // Return whether or not something spills.
585    spills = Select();
586  }
587
588  // Count number of Simplify-Select trips per coloring success.
589  _allocator_attempts += _trip_cnt + 1;
590  _allocator_successes += 1;
591
592  // Peephole remove copies
593  post_allocate_copy_removal();
594
595#ifdef ASSERT
596  // Veify the graph after RA.
597  verify(&live_arena);
598#endif
599
600  // max_reg is past the largest *register* used.
601  // Convert that to a frame_slot number.
602  if (_max_reg <= _matcher._new_SP) {
603    _framesize = C->out_preserve_stack_slots();
604  }
605  else {
606    _framesize = _max_reg -_matcher._new_SP;
607  }
608  assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
609
610  // This frame must preserve the required fp alignment
611  _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
612  assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
613#ifndef PRODUCT
614  _total_framesize += _framesize;
615  if ((int)_framesize > _max_framesize) {
616    _max_framesize = _framesize;
617  }
618#endif
619
620  // Convert CISC spills
621  fixup_spills();
622
623  // Log regalloc results
624  CompileLog* log = Compile::current()->log();
625  if (log != NULL) {
626    log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
627  }
628
629  if (C->failing()) {
630    return;
631  }
632
633  NOT_PRODUCT(C->verify_graph_edges();)
634
635  // Move important info out of the live_arena to longer lasting storage.
636  alloc_node_regs(_lrg_map.size());
637  for (uint i=0; i < _lrg_map.size(); i++) {
638    if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
639      LRG &lrg = lrgs(_lrg_map.live_range_id(i));
640      if (!lrg.alive()) {
641        set_bad(i);
642      } else if (lrg.num_regs() == 1) {
643        set1(i, lrg.reg());
644      } else {                  // Must be a register-set
645        if (!lrg._fat_proj) {   // Must be aligned adjacent register set
646          // Live ranges record the highest register in their mask.
647          // We want the low register for the AD file writer's convenience.
648          OptoReg::Name hi = lrg.reg(); // Get hi register
649          OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
650          // We have to use pair [lo,lo+1] even for wide vectors because
651          // the rest of code generation works only with pairs. It is safe
652          // since for registers encoding only 'lo' is used.
653          // Second reg from pair is used in ScheduleAndBundle on SPARC where
654          // vector max size is 8 which corresponds to registers pair.
655          // It is also used in BuildOopMaps but oop operations are not
656          // vectorized.
657          set2(i, lo);
658        } else {                // Misaligned; extract 2 bits
659          OptoReg::Name hi = lrg.reg(); // Get hi register
660          lrg.Remove(hi);       // Yank from mask
661          int lo = lrg.mask().find_first_elem(); // Find lo
662          set_pair(i, hi, lo);
663        }
664      }
665      if( lrg._is_oop ) _node_oops.set(i);
666    } else {
667      set_bad(i);
668    }
669  }
670
671  // Done!
672  _live = NULL;
673  _ifg = NULL;
674  C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
675}
676
677//------------------------------de_ssa-----------------------------------------
678void PhaseChaitin::de_ssa() {
679  // Set initial Names for all Nodes.  Most Nodes get the virtual register
680  // number.  A few get the ZERO live range number.  These do not
681  // get allocated, but instead rely on correct scheduling to ensure that
682  // only one instance is simultaneously live at a time.
683  uint lr_counter = 1;
684  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
685    Block *b = _cfg._blocks[i];
686    uint cnt = b->_nodes.size();
687
688    // Handle all the normal Nodes in the block
689    for( uint j = 0; j < cnt; j++ ) {
690      Node *n = b->_nodes[j];
691      // Pre-color to the zero live range, or pick virtual register
692      const RegMask &rm = n->out_RegMask();
693      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
694    }
695  }
696  // Reset the Union-Find mapping to be identity
697  _lrg_map.reset_uf_map(lr_counter);
698}
699
700
701//------------------------------gather_lrg_masks-------------------------------
702// Gather LiveRanGe information, including register masks.  Modification of
703// cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
704void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
705
706  // Nail down the frame pointer live range
707  uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
708  lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
709
710  // For all blocks
711  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
712    Block *b = _cfg._blocks[i];
713
714    // For all instructions
715    for( uint j = 1; j < b->_nodes.size(); j++ ) {
716      Node *n = b->_nodes[j];
717      uint input_edge_start =1; // Skip control most nodes
718      if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base();
719      uint idx = n->is_Copy();
720
721      // Get virtual register number, same as LiveRanGe index
722      uint vreg = _lrg_map.live_range_id(n);
723      LRG &lrg = lrgs(vreg);
724      if( vreg ) {              // No vreg means un-allocable (e.g. memory)
725
726        // Collect has-copy bit
727        if( idx ) {
728          lrg._has_copy = 1;
729          uint clidx = _lrg_map.live_range_id(n->in(idx));
730          LRG &copy_src = lrgs(clidx);
731          copy_src._has_copy = 1;
732        }
733
734        // Check for float-vs-int live range (used in register-pressure
735        // calculations)
736        const Type *n_type = n->bottom_type();
737        if (n_type->is_floatingpoint())
738          lrg._is_float = 1;
739
740        // Check for twice prior spilling.  Once prior spilling might have
741        // spilled 'soft', 2nd prior spill should have spilled 'hard' and
742        // further spilling is unlikely to make progress.
743        if( _spilled_once.test(n->_idx) ) {
744          lrg._was_spilled1 = 1;
745          if( _spilled_twice.test(n->_idx) )
746            lrg._was_spilled2 = 1;
747        }
748
749#ifndef PRODUCT
750        if (trace_spilling() && lrg._def != NULL) {
751          // collect defs for MultiDef printing
752          if (lrg._defs == NULL) {
753            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
754            lrg._defs->append(lrg._def);
755          }
756          lrg._defs->append(n);
757        }
758#endif
759
760        // Check for a single def LRG; these can spill nicely
761        // via rematerialization.  Flag as NULL for no def found
762        // yet, or 'n' for single def or -1 for many defs.
763        lrg._def = lrg._def ? NodeSentinel : n;
764
765        // Limit result register mask to acceptable registers
766        const RegMask &rm = n->out_RegMask();
767        lrg.AND( rm );
768
769        int ireg = n->ideal_reg();
770        assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
771                "oops must be in Op_RegP's" );
772
773        // Check for vector live range (only if vector register is used).
774        // On SPARC vector uses RegD which could be misaligned so it is not
775        // processes as vector in RA.
776        if (RegMask::is_vector(ireg))
777          lrg._is_vector = 1;
778        assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD,
779               "vector must be in vector registers");
780
781        // Check for bound register masks
782        const RegMask &lrgmask = lrg.mask();
783        if (lrgmask.is_bound(ireg))
784          lrg._is_bound = 1;
785
786        // Check for maximum frequency value
787        if (lrg._maxfreq < b->_freq)
788          lrg._maxfreq = b->_freq;
789
790        // Check for oop-iness, or long/double
791        // Check for multi-kill projection
792        switch( ireg ) {
793        case MachProjNode::fat_proj:
794          // Fat projections have size equal to number of registers killed
795          lrg.set_num_regs(rm.Size());
796          lrg.set_reg_pressure(lrg.num_regs());
797          lrg._fat_proj = 1;
798          lrg._is_bound = 1;
799          break;
800        case Op_RegP:
801#ifdef _LP64
802          lrg.set_num_regs(2);  // Size is 2 stack words
803#else
804          lrg.set_num_regs(1);  // Size is 1 stack word
805#endif
806          // Register pressure is tracked relative to the maximum values
807          // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
808          // and relative to other types which compete for the same regs.
809          //
810          // The following table contains suggested values based on the
811          // architectures as defined in each .ad file.
812          // INTPRESSURE and FLOATPRESSURE may be tuned differently for
813          // compile-speed or performance.
814          // Note1:
815          // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
816          // since .ad registers are defined as high and low halves.
817          // These reg_pressure values remain compatible with the code
818          // in is_high_pressure() which relates get_invalid_mask_size(),
819          // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
820          // Note2:
821          // SPARC -d32 has 24 registers available for integral values,
822          // but only 10 of these are safe for 64-bit longs.
823          // Using set_reg_pressure(2) for both int and long means
824          // the allocator will believe it can fit 26 longs into
825          // registers.  Using 2 for longs and 1 for ints means the
826          // allocator will attempt to put 52 integers into registers.
827          // The settings below limit this problem to methods with
828          // many long values which are being run on 32-bit SPARC.
829          //
830          // ------------------- reg_pressure --------------------
831          // Each entry is reg_pressure_per_value,number_of_regs
832          //         RegL  RegI  RegFlags   RegF RegD    INTPRESSURE  FLOATPRESSURE
833          // IA32     2     1     1          1    1          6           6
834          // IA64     1     1     1          1    1         50          41
835          // SPARC    2     2     2          2    2         48 (24)     52 (26)
836          // SPARCV9  2     2     2          2    2         48 (24)     52 (26)
837          // AMD64    1     1     1          1    1         14          15
838          // -----------------------------------------------------
839#if defined(SPARC)
840          lrg.set_reg_pressure(2);  // use for v9 as well
841#else
842          lrg.set_reg_pressure(1);  // normally one value per register
843#endif
844          if( n_type->isa_oop_ptr() ) {
845            lrg._is_oop = 1;
846          }
847          break;
848        case Op_RegL:           // Check for long or double
849        case Op_RegD:
850          lrg.set_num_regs(2);
851          // Define platform specific register pressure
852#if defined(SPARC) || defined(ARM)
853          lrg.set_reg_pressure(2);
854#elif defined(IA32)
855          if( ireg == Op_RegL ) {
856            lrg.set_reg_pressure(2);
857          } else {
858            lrg.set_reg_pressure(1);
859          }
860#else
861          lrg.set_reg_pressure(1);  // normally one value per register
862#endif
863          // If this def of a double forces a mis-aligned double,
864          // flag as '_fat_proj' - really flag as allowing misalignment
865          // AND changes how we count interferences.  A mis-aligned
866          // double can interfere with TWO aligned pairs, or effectively
867          // FOUR registers!
868          if (rm.is_misaligned_pair()) {
869            lrg._fat_proj = 1;
870            lrg._is_bound = 1;
871          }
872          break;
873        case Op_RegF:
874        case Op_RegI:
875        case Op_RegN:
876        case Op_RegFlags:
877        case 0:                 // not an ideal register
878          lrg.set_num_regs(1);
879#ifdef SPARC
880          lrg.set_reg_pressure(2);
881#else
882          lrg.set_reg_pressure(1);
883#endif
884          break;
885        case Op_VecS:
886          assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
887          assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
888          lrg.set_num_regs(RegMask::SlotsPerVecS);
889          lrg.set_reg_pressure(1);
890          break;
891        case Op_VecD:
892          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
893          assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
894          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
895          lrg.set_num_regs(RegMask::SlotsPerVecD);
896          lrg.set_reg_pressure(1);
897          break;
898        case Op_VecX:
899          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
900          assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
901          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
902          lrg.set_num_regs(RegMask::SlotsPerVecX);
903          lrg.set_reg_pressure(1);
904          break;
905        case Op_VecY:
906          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
907          assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
908          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
909          lrg.set_num_regs(RegMask::SlotsPerVecY);
910          lrg.set_reg_pressure(1);
911          break;
912        default:
913          ShouldNotReachHere();
914        }
915      }
916
917      // Now do the same for inputs
918      uint cnt = n->req();
919      // Setup for CISC SPILLING
920      uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
921      if( UseCISCSpill && after_aggressive ) {
922        inp = n->cisc_operand();
923        if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
924          // Convert operand number to edge index number
925          inp = n->as_Mach()->operand_index(inp);
926      }
927      // Prepare register mask for each input
928      for( uint k = input_edge_start; k < cnt; k++ ) {
929        uint vreg = _lrg_map.live_range_id(n->in(k));
930        if (!vreg) {
931          continue;
932        }
933
934        // If this instruction is CISC Spillable, add the flags
935        // bit to its appropriate input
936        if( UseCISCSpill && after_aggressive && inp == k ) {
937#ifndef PRODUCT
938          if( TraceCISCSpill ) {
939            tty->print("  use_cisc_RegMask: ");
940            n->dump();
941          }
942#endif
943          n->as_Mach()->use_cisc_RegMask();
944        }
945
946        LRG &lrg = lrgs(vreg);
947        // // Testing for floating point code shape
948        // Node *test = n->in(k);
949        // if( test->is_Mach() ) {
950        //   MachNode *m = test->as_Mach();
951        //   int  op = m->ideal_Opcode();
952        //   if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
953        //     int zzz = 1;
954        //   }
955        // }
956
957        // Limit result register mask to acceptable registers.
958        // Do not limit registers from uncommon uses before
959        // AggressiveCoalesce.  This effectively pre-virtual-splits
960        // around uncommon uses of common defs.
961        const RegMask &rm = n->in_RegMask(k);
962        if( !after_aggressive &&
963          _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
964          // Since we are BEFORE aggressive coalesce, leave the register
965          // mask untrimmed by the call.  This encourages more coalescing.
966          // Later, AFTER aggressive, this live range will have to spill
967          // but the spiller handles slow-path calls very nicely.
968        } else {
969          lrg.AND( rm );
970        }
971
972        // Check for bound register masks
973        const RegMask &lrgmask = lrg.mask();
974        int kreg = n->in(k)->ideal_reg();
975        bool is_vect = RegMask::is_vector(kreg);
976        assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
977               is_vect || kreg == Op_RegD,
978               "vector must be in vector registers");
979        if (lrgmask.is_bound(kreg))
980          lrg._is_bound = 1;
981
982        // If this use of a double forces a mis-aligned double,
983        // flag as '_fat_proj' - really flag as allowing misalignment
984        // AND changes how we count interferences.  A mis-aligned
985        // double can interfere with TWO aligned pairs, or effectively
986        // FOUR registers!
987#ifdef ASSERT
988        if (is_vect) {
989          assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
990          assert(!lrg._fat_proj, "sanity");
991          assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
992        }
993#endif
994        if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
995          lrg._fat_proj = 1;
996          lrg._is_bound = 1;
997        }
998        // if the LRG is an unaligned pair, we will have to spill
999        // so clear the LRG's register mask if it is not already spilled
1000        if (!is_vect && !n->is_SpillCopy() &&
1001            (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
1002            lrgmask.is_misaligned_pair()) {
1003          lrg.Clear();
1004        }
1005
1006        // Check for maximum frequency value
1007        if( lrg._maxfreq < b->_freq )
1008          lrg._maxfreq = b->_freq;
1009
1010      } // End for all allocated inputs
1011    } // end for all instructions
1012  } // end for all blocks
1013
1014  // Final per-liverange setup
1015  for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1016    LRG &lrg = lrgs(i2);
1017    assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1018    if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1019      lrg.clear_to_sets();
1020    }
1021    lrg.compute_set_mask_size();
1022    if (lrg.not_free()) {      // Handle case where we lose from the start
1023      lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1024      lrg._direct_conflict = 1;
1025    }
1026    lrg.set_degree(0);          // no neighbors in IFG yet
1027  }
1028}
1029
1030//------------------------------set_was_low------------------------------------
1031// Set the was-lo-degree bit.  Conservative coalescing should not change the
1032// colorability of the graph.  If any live range was of low-degree before
1033// coalescing, it should Simplify.  This call sets the was-lo-degree bit.
1034// The bit is checked in Simplify.
1035void PhaseChaitin::set_was_low() {
1036#ifdef ASSERT
1037  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1038    int size = lrgs(i).num_regs();
1039    uint old_was_lo = lrgs(i)._was_lo;
1040    lrgs(i)._was_lo = 0;
1041    if( lrgs(i).lo_degree() ) {
1042      lrgs(i)._was_lo = 1;      // Trivially of low degree
1043    } else {                    // Else check the Brigg's assertion
1044      // Brigg's observation is that the lo-degree neighbors of a
1045      // hi-degree live range will not interfere with the color choices
1046      // of said hi-degree live range.  The Simplify reverse-stack-coloring
1047      // order takes care of the details.  Hence you do not have to count
1048      // low-degree neighbors when determining if this guy colors.
1049      int briggs_degree = 0;
1050      IndexSet *s = _ifg->neighbors(i);
1051      IndexSetIterator elements(s);
1052      uint lidx;
1053      while((lidx = elements.next()) != 0) {
1054        if( !lrgs(lidx).lo_degree() )
1055          briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1056      }
1057      if( briggs_degree < lrgs(i).degrees_of_freedom() )
1058        lrgs(i)._was_lo = 1;    // Low degree via the briggs assertion
1059    }
1060    assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1061  }
1062#endif
1063}
1064
1065#define REGISTER_CONSTRAINED 16
1066
1067//------------------------------cache_lrg_info---------------------------------
1068// Compute cost/area ratio, in case we spill.  Build the lo-degree list.
1069void PhaseChaitin::cache_lrg_info( ) {
1070
1071  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1072    LRG &lrg = lrgs(i);
1073
1074    // Check for being of low degree: means we can be trivially colored.
1075    // Low degree, dead or must-spill guys just get to simplify right away
1076    if( lrg.lo_degree() ||
1077       !lrg.alive() ||
1078        lrg._must_spill ) {
1079      // Split low degree list into those guys that must get a
1080      // register and those that can go to register or stack.
1081      // The idea is LRGs that can go register or stack color first when
1082      // they have a good chance of getting a register.  The register-only
1083      // lo-degree live ranges always get a register.
1084      OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1085      if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1086        lrg._next = _lo_stk_degree;
1087        _lo_stk_degree = i;
1088      } else {
1089        lrg._next = _lo_degree;
1090        _lo_degree = i;
1091      }
1092    } else {                    // Else high degree
1093      lrgs(_hi_degree)._prev = i;
1094      lrg._next = _hi_degree;
1095      lrg._prev = 0;
1096      _hi_degree = i;
1097    }
1098  }
1099}
1100
1101//------------------------------Pre-Simplify-----------------------------------
1102// Simplify the IFG by removing LRGs of low degree that have NO copies
1103void PhaseChaitin::Pre_Simplify( ) {
1104
1105  // Warm up the lo-degree no-copy list
1106  int lo_no_copy = 0;
1107  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1108    if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1109        !lrgs(i).alive() ||
1110        lrgs(i)._must_spill) {
1111      lrgs(i)._next = lo_no_copy;
1112      lo_no_copy = i;
1113    }
1114  }
1115
1116  while( lo_no_copy ) {
1117    uint lo = lo_no_copy;
1118    lo_no_copy = lrgs(lo)._next;
1119    int size = lrgs(lo).num_regs();
1120
1121    // Put the simplified guy on the simplified list.
1122    lrgs(lo)._next = _simplified;
1123    _simplified = lo;
1124
1125    // Yank this guy from the IFG.
1126    IndexSet *adj = _ifg->remove_node( lo );
1127
1128    // If any neighbors' degrees fall below their number of
1129    // allowed registers, then put that neighbor on the low degree
1130    // list.  Note that 'degree' can only fall and 'numregs' is
1131    // unchanged by this action.  Thus the two are equal at most once,
1132    // so LRGs hit the lo-degree worklists at most once.
1133    IndexSetIterator elements(adj);
1134    uint neighbor;
1135    while ((neighbor = elements.next()) != 0) {
1136      LRG *n = &lrgs(neighbor);
1137      assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1138
1139      // Check for just becoming of-low-degree
1140      if( n->just_lo_degree() && !n->_has_copy ) {
1141        assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1142        // Put on lo-degree list
1143        n->_next = lo_no_copy;
1144        lo_no_copy = neighbor;
1145      }
1146    }
1147  } // End of while lo-degree no_copy worklist not empty
1148
1149  // No more lo-degree no-copy live ranges to simplify
1150}
1151
1152//------------------------------Simplify---------------------------------------
1153// Simplify the IFG by removing LRGs of low degree.
1154void PhaseChaitin::Simplify( ) {
1155
1156  while( 1 ) {                  // Repeat till simplified it all
1157    // May want to explore simplifying lo_degree before _lo_stk_degree.
1158    // This might result in more spills coloring into registers during
1159    // Select().
1160    while( _lo_degree || _lo_stk_degree ) {
1161      // If possible, pull from lo_stk first
1162      uint lo;
1163      if( _lo_degree ) {
1164        lo = _lo_degree;
1165        _lo_degree = lrgs(lo)._next;
1166      } else {
1167        lo = _lo_stk_degree;
1168        _lo_stk_degree = lrgs(lo)._next;
1169      }
1170
1171      // Put the simplified guy on the simplified list.
1172      lrgs(lo)._next = _simplified;
1173      _simplified = lo;
1174      // If this guy is "at risk" then mark his current neighbors
1175      if( lrgs(lo)._at_risk ) {
1176        IndexSetIterator elements(_ifg->neighbors(lo));
1177        uint datum;
1178        while ((datum = elements.next()) != 0) {
1179          lrgs(datum)._risk_bias = lo;
1180        }
1181      }
1182
1183      // Yank this guy from the IFG.
1184      IndexSet *adj = _ifg->remove_node( lo );
1185
1186      // If any neighbors' degrees fall below their number of
1187      // allowed registers, then put that neighbor on the low degree
1188      // list.  Note that 'degree' can only fall and 'numregs' is
1189      // unchanged by this action.  Thus the two are equal at most once,
1190      // so LRGs hit the lo-degree worklist at most once.
1191      IndexSetIterator elements(adj);
1192      uint neighbor;
1193      while ((neighbor = elements.next()) != 0) {
1194        LRG *n = &lrgs(neighbor);
1195#ifdef ASSERT
1196        if( VerifyOpto || VerifyRegisterAllocator ) {
1197          assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1198        }
1199#endif
1200
1201        // Check for just becoming of-low-degree just counting registers.
1202        // _must_spill live ranges are already on the low degree list.
1203        if( n->just_lo_degree() && !n->_must_spill ) {
1204          assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1205          // Pull from hi-degree list
1206          uint prev = n->_prev;
1207          uint next = n->_next;
1208          if( prev ) lrgs(prev)._next = next;
1209          else _hi_degree = next;
1210          lrgs(next)._prev = prev;
1211          n->_next = _lo_degree;
1212          _lo_degree = neighbor;
1213        }
1214      }
1215    } // End of while lo-degree/lo_stk_degree worklist not empty
1216
1217    // Check for got everything: is hi-degree list empty?
1218    if( !_hi_degree ) break;
1219
1220    // Time to pick a potential spill guy
1221    uint lo_score = _hi_degree;
1222    double score = lrgs(lo_score).score();
1223    double area = lrgs(lo_score)._area;
1224    double cost = lrgs(lo_score)._cost;
1225    bool bound = lrgs(lo_score)._is_bound;
1226
1227    // Find cheapest guy
1228    debug_only( int lo_no_simplify=0; );
1229    for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1230      assert( !(*_ifg->_yanked)[i], "" );
1231      // It's just vaguely possible to move hi-degree to lo-degree without
1232      // going through a just-lo-degree stage: If you remove a double from
1233      // a float live range it's degree will drop by 2 and you can skip the
1234      // just-lo-degree stage.  It's very rare (shows up after 5000+ methods
1235      // in -Xcomp of Java2Demo).  So just choose this guy to simplify next.
1236      if( lrgs(i).lo_degree() ) {
1237        lo_score = i;
1238        break;
1239      }
1240      debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1241      double iscore = lrgs(i).score();
1242      double iarea = lrgs(i)._area;
1243      double icost = lrgs(i)._cost;
1244      bool ibound = lrgs(i)._is_bound;
1245
1246      // Compare cost/area of i vs cost/area of lo_score.  Smaller cost/area
1247      // wins.  Ties happen because all live ranges in question have spilled
1248      // a few times before and the spill-score adds a huge number which
1249      // washes out the low order bits.  We are choosing the lesser of 2
1250      // evils; in this case pick largest area to spill.
1251      // Ties also happen when live ranges are defined and used only inside
1252      // one block. In which case their area is 0 and score set to max.
1253      // In such case choose bound live range over unbound to free registers
1254      // or with smaller cost to spill.
1255      if( iscore < score ||
1256          (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1257          (iscore == score && iarea == area &&
1258           ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1259        lo_score = i;
1260        score = iscore;
1261        area = iarea;
1262        cost = icost;
1263        bound = ibound;
1264      }
1265    }
1266    LRG *lo_lrg = &lrgs(lo_score);
1267    // The live range we choose for spilling is either hi-degree, or very
1268    // rarely it can be low-degree.  If we choose a hi-degree live range
1269    // there better not be any lo-degree choices.
1270    assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1271
1272    // Pull from hi-degree list
1273    uint prev = lo_lrg->_prev;
1274    uint next = lo_lrg->_next;
1275    if( prev ) lrgs(prev)._next = next;
1276    else _hi_degree = next;
1277    lrgs(next)._prev = prev;
1278    // Jam him on the lo-degree list, despite his high degree.
1279    // Maybe he'll get a color, and maybe he'll spill.
1280    // Only Select() will know.
1281    lrgs(lo_score)._at_risk = true;
1282    _lo_degree = lo_score;
1283    lo_lrg->_next = 0;
1284
1285  } // End of while not simplified everything
1286
1287}
1288
1289//------------------------------is_legal_reg-----------------------------------
1290// Is 'reg' register legal for 'lrg'?
1291static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1292  if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1293      lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1294    // RA uses OptoReg which represent the highest element of a registers set.
1295    // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1296    // in which XMMd is used by RA to represent such vectors. A double value
1297    // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1298    // The register mask uses largest bits set of overlapping register sets.
1299    // On x86 with AVX it uses 8 bits for each XMM registers set.
1300    //
1301    // The 'lrg' already has cleared-to-set register mask (done in Select()
1302    // before calling choose_color()). Passing mask.Member(reg) check above
1303    // indicates that the size (num_regs) of 'reg' set is less or equal to
1304    // 'lrg' set size.
1305    // For set size 1 any register which is member of 'lrg' mask is legal.
1306    if (lrg.num_regs()==1)
1307      return true;
1308    // For larger sets only an aligned register with the same set size is legal.
1309    int mask = lrg.num_regs()-1;
1310    if ((reg&mask) == mask)
1311      return true;
1312  }
1313  return false;
1314}
1315
1316//------------------------------bias_color-------------------------------------
1317// Choose a color using the biasing heuristic
1318OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1319
1320  // Check for "at_risk" LRG's
1321  uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1322  if( risk_lrg != 0 ) {
1323    // Walk the colored neighbors of the "at_risk" candidate
1324    // Choose a color which is both legal and already taken by a neighbor
1325    // of the "at_risk" candidate in order to improve the chances of the
1326    // "at_risk" candidate of coloring
1327    IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1328    uint datum;
1329    while ((datum = elements.next()) != 0) {
1330      OptoReg::Name reg = lrgs(datum).reg();
1331      // If this LRG's register is legal for us, choose it
1332      if (is_legal_reg(lrg, reg, chunk))
1333        return reg;
1334    }
1335  }
1336
1337  uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1338  if( copy_lrg != 0 ) {
1339    // If he has a color,
1340    if( !(*(_ifg->_yanked))[copy_lrg] ) {
1341      OptoReg::Name reg = lrgs(copy_lrg).reg();
1342      //  And it is legal for you,
1343      if (is_legal_reg(lrg, reg, chunk))
1344        return reg;
1345    } else if( chunk == 0 ) {
1346      // Choose a color which is legal for him
1347      RegMask tempmask = lrg.mask();
1348      tempmask.AND(lrgs(copy_lrg).mask());
1349      tempmask.clear_to_sets(lrg.num_regs());
1350      OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1351      if (OptoReg::is_valid(reg))
1352        return reg;
1353    }
1354  }
1355
1356  // If no bias info exists, just go with the register selection ordering
1357  if (lrg._is_vector || lrg.num_regs() == 2) {
1358    // Find an aligned set
1359    return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1360  }
1361
1362  // CNC - Fun hack.  Alternate 1st and 2nd selection.  Enables post-allocate
1363  // copy removal to remove many more copies, by preventing a just-assigned
1364  // register from being repeatedly assigned.
1365  OptoReg::Name reg = lrg.mask().find_first_elem();
1366  if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1367    // This 'Remove; find; Insert' idiom is an expensive way to find the
1368    // SECOND element in the mask.
1369    lrg.Remove(reg);
1370    OptoReg::Name reg2 = lrg.mask().find_first_elem();
1371    lrg.Insert(reg);
1372    if( OptoReg::is_reg(reg2))
1373      reg = reg2;
1374  }
1375  return OptoReg::add( reg, chunk );
1376}
1377
1378//------------------------------choose_color-----------------------------------
1379// Choose a color in the current chunk
1380OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1381  assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1382  assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1383
1384  if( lrg.num_regs() == 1 ||    // Common Case
1385      !lrg._fat_proj )          // Aligned+adjacent pairs ok
1386    // Use a heuristic to "bias" the color choice
1387    return bias_color(lrg, chunk);
1388
1389  assert(!lrg._is_vector, "should be not vector here" );
1390  assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1391
1392  // Fat-proj case or misaligned double argument.
1393  assert(lrg.compute_mask_size() == lrg.num_regs() ||
1394         lrg.num_regs() == 2,"fat projs exactly color" );
1395  assert( !chunk, "always color in 1st chunk" );
1396  // Return the highest element in the set.
1397  return lrg.mask().find_last_elem();
1398}
1399
1400//------------------------------Select-----------------------------------------
1401// Select colors by re-inserting LRGs back into the IFG.  LRGs are re-inserted
1402// in reverse order of removal.  As long as nothing of hi-degree was yanked,
1403// everything going back is guaranteed a color.  Select that color.  If some
1404// hi-degree LRG cannot get a color then we record that we must spill.
1405uint PhaseChaitin::Select( ) {
1406  uint spill_reg = LRG::SPILL_REG;
1407  _max_reg = OptoReg::Name(0);  // Past max register used
1408  while( _simplified ) {
1409    // Pull next LRG from the simplified list - in reverse order of removal
1410    uint lidx = _simplified;
1411    LRG *lrg = &lrgs(lidx);
1412    _simplified = lrg->_next;
1413
1414
1415#ifndef PRODUCT
1416    if (trace_spilling()) {
1417      ttyLocker ttyl;
1418      tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1419                    lrg->degrees_of_freedom());
1420      lrg->dump();
1421    }
1422#endif
1423
1424    // Re-insert into the IFG
1425    _ifg->re_insert(lidx);
1426    if( !lrg->alive() ) continue;
1427    // capture allstackedness flag before mask is hacked
1428    const int is_allstack = lrg->mask().is_AllStack();
1429
1430    // Yeah, yeah, yeah, I know, I know.  I can refactor this
1431    // to avoid the GOTO, although the refactored code will not
1432    // be much clearer.  We arrive here IFF we have a stack-based
1433    // live range that cannot color in the current chunk, and it
1434    // has to move into the next free stack chunk.
1435    int chunk = 0;              // Current chunk is first chunk
1436    retry_next_chunk:
1437
1438    // Remove neighbor colors
1439    IndexSet *s = _ifg->neighbors(lidx);
1440
1441    debug_only(RegMask orig_mask = lrg->mask();)
1442    IndexSetIterator elements(s);
1443    uint neighbor;
1444    while ((neighbor = elements.next()) != 0) {
1445      // Note that neighbor might be a spill_reg.  In this case, exclusion
1446      // of its color will be a no-op, since the spill_reg chunk is in outer
1447      // space.  Also, if neighbor is in a different chunk, this exclusion
1448      // will be a no-op.  (Later on, if lrg runs out of possible colors in
1449      // its chunk, a new chunk of color may be tried, in which case
1450      // examination of neighbors is started again, at retry_next_chunk.)
1451      LRG &nlrg = lrgs(neighbor);
1452      OptoReg::Name nreg = nlrg.reg();
1453      // Only subtract masks in the same chunk
1454      if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1455#ifndef PRODUCT
1456        uint size = lrg->mask().Size();
1457        RegMask rm = lrg->mask();
1458#endif
1459        lrg->SUBTRACT(nlrg.mask());
1460#ifndef PRODUCT
1461        if (trace_spilling() && lrg->mask().Size() != size) {
1462          ttyLocker ttyl;
1463          tty->print("L%d ", lidx);
1464          rm.dump();
1465          tty->print(" intersected L%d ", neighbor);
1466          nlrg.mask().dump();
1467          tty->print(" removed ");
1468          rm.SUBTRACT(lrg->mask());
1469          rm.dump();
1470          tty->print(" leaving ");
1471          lrg->mask().dump();
1472          tty->cr();
1473        }
1474#endif
1475      }
1476    }
1477    //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1478    // Aligned pairs need aligned masks
1479    assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1480    if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1481      lrg->clear_to_sets();
1482    }
1483
1484    // Check if a color is available and if so pick the color
1485    OptoReg::Name reg = choose_color( *lrg, chunk );
1486#ifdef SPARC
1487    debug_only(lrg->compute_set_mask_size());
1488    assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1489#endif
1490
1491    //---------------
1492    // If we fail to color and the AllStack flag is set, trigger
1493    // a chunk-rollover event
1494    if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1495      // Bump register mask up to next stack chunk
1496      chunk += RegMask::CHUNK_SIZE;
1497      lrg->Set_All();
1498
1499      goto retry_next_chunk;
1500    }
1501
1502    //---------------
1503    // Did we get a color?
1504    else if( OptoReg::is_valid(reg)) {
1505#ifndef PRODUCT
1506      RegMask avail_rm = lrg->mask();
1507#endif
1508
1509      // Record selected register
1510      lrg->set_reg(reg);
1511
1512      if( reg >= _max_reg )     // Compute max register limit
1513        _max_reg = OptoReg::add(reg,1);
1514      // Fold reg back into normal space
1515      reg = OptoReg::add(reg,-chunk);
1516
1517      // If the live range is not bound, then we actually had some choices
1518      // to make.  In this case, the mask has more bits in it than the colors
1519      // chosen.  Restrict the mask to just what was picked.
1520      int n_regs = lrg->num_regs();
1521      assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1522      if (n_regs == 1 || !lrg->_fat_proj) {
1523        assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity");
1524        lrg->Clear();           // Clear the mask
1525        lrg->Insert(reg);       // Set regmask to match selected reg
1526        // For vectors and pairs, also insert the low bit of the pair
1527        for (int i = 1; i < n_regs; i++)
1528          lrg->Insert(OptoReg::add(reg,-i));
1529        lrg->set_mask_size(n_regs);
1530      } else {                  // Else fatproj
1531        // mask must be equal to fatproj bits, by definition
1532      }
1533#ifndef PRODUCT
1534      if (trace_spilling()) {
1535        ttyLocker ttyl;
1536        tty->print("L%d selected ", lidx);
1537        lrg->mask().dump();
1538        tty->print(" from ");
1539        avail_rm.dump();
1540        tty->cr();
1541      }
1542#endif
1543      // Note that reg is the highest-numbered register in the newly-bound mask.
1544    } // end color available case
1545
1546    //---------------
1547    // Live range is live and no colors available
1548    else {
1549      assert( lrg->alive(), "" );
1550      assert( !lrg->_fat_proj || lrg->is_multidef() ||
1551              lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1552      assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1553
1554      // Assign the special spillreg register
1555      lrg->set_reg(OptoReg::Name(spill_reg++));
1556      // Do not empty the regmask; leave mask_size lying around
1557      // for use during Spilling
1558#ifndef PRODUCT
1559      if( trace_spilling() ) {
1560        ttyLocker ttyl;
1561        tty->print("L%d spilling with neighbors: ", lidx);
1562        s->dump();
1563        debug_only(tty->print(" original mask: "));
1564        debug_only(orig_mask.dump());
1565        dump_lrg(lidx);
1566      }
1567#endif
1568    } // end spill case
1569
1570  }
1571
1572  return spill_reg-LRG::SPILL_REG;      // Return number of spills
1573}
1574
1575
1576//------------------------------copy_was_spilled-------------------------------
1577// Copy 'was_spilled'-edness from the source Node to the dst Node.
1578void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1579  if( _spilled_once.test(src->_idx) ) {
1580    _spilled_once.set(dst->_idx);
1581    lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1582    if( _spilled_twice.test(src->_idx) ) {
1583      _spilled_twice.set(dst->_idx);
1584      lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1585    }
1586  }
1587}
1588
1589//------------------------------set_was_spilled--------------------------------
1590// Set the 'spilled_once' or 'spilled_twice' flag on a node.
1591void PhaseChaitin::set_was_spilled( Node *n ) {
1592  if( _spilled_once.test_set(n->_idx) )
1593    _spilled_twice.set(n->_idx);
1594}
1595
1596//------------------------------fixup_spills-----------------------------------
1597// Convert Ideal spill instructions into proper FramePtr + offset Loads and
1598// Stores.  Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1599void PhaseChaitin::fixup_spills() {
1600  // This function does only cisc spill work.
1601  if( !UseCISCSpill ) return;
1602
1603  NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
1604
1605  // Grab the Frame Pointer
1606  Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr);
1607
1608  // For all blocks
1609  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
1610    Block *b = _cfg._blocks[i];
1611
1612    // For all instructions in block
1613    uint last_inst = b->end_idx();
1614    for( uint j = 1; j <= last_inst; j++ ) {
1615      Node *n = b->_nodes[j];
1616
1617      // Dead instruction???
1618      assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1619              C->top() == n ||  // Or the random TOP node
1620              n->is_Proj(),     // Or a fat-proj kill node
1621              "No dead instructions after post-alloc" );
1622
1623      int inp = n->cisc_operand();
1624      if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1625        // Convert operand number to edge index number
1626        MachNode *mach = n->as_Mach();
1627        inp = mach->operand_index(inp);
1628        Node *src = n->in(inp);   // Value to load or store
1629        LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1630        OptoReg::Name src_reg = lrg_cisc.reg();
1631        // Doubles record the HIGH register of an adjacent pair.
1632        src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1633        if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1634          // This is a CISC Spill, get stack offset and construct new node
1635#ifndef PRODUCT
1636          if( TraceCISCSpill ) {
1637            tty->print("    reg-instr:  ");
1638            n->dump();
1639          }
1640#endif
1641          int stk_offset = reg2offset(src_reg);
1642          // Bailout if we might exceed node limit when spilling this instruction
1643          C->check_node_count(0, "out of nodes fixing spills");
1644          if (C->failing())  return;
1645          // Transform node
1646          MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
1647          cisc->set_req(inp,fp);          // Base register is frame pointer
1648          if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1649            assert( cisc->oper_input_base() == 2, "Only adding one edge");
1650            cisc->ins_req(1,src);         // Requires a memory edge
1651          }
1652          b->_nodes.map(j,cisc);          // Insert into basic block
1653          n->subsume_by(cisc, C); // Correct graph
1654          //
1655          ++_used_cisc_instructions;
1656#ifndef PRODUCT
1657          if( TraceCISCSpill ) {
1658            tty->print("    cisc-instr: ");
1659            cisc->dump();
1660          }
1661#endif
1662        } else {
1663#ifndef PRODUCT
1664          if( TraceCISCSpill ) {
1665            tty->print("    using reg-instr: ");
1666            n->dump();
1667          }
1668#endif
1669          ++_unused_cisc_instructions;    // input can be on stack
1670        }
1671      }
1672
1673    } // End of for all instructions
1674
1675  } // End of for all blocks
1676}
1677
1678//------------------------------find_base_for_derived--------------------------
1679// Helper to stretch above; recursively discover the base Node for a
1680// given derived Node.  Easy for AddP-related machine nodes, but needs
1681// to be recursive for derived Phis.
1682Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1683  // See if already computed; if so return it
1684  if( derived_base_map[derived->_idx] )
1685    return derived_base_map[derived->_idx];
1686
1687  // See if this happens to be a base.
1688  // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1689  // pointers derived from NULL!  These are always along paths that
1690  // can't happen at run-time but the optimizer cannot deduce it so
1691  // we have to handle it gracefully.
1692  assert(!derived->bottom_type()->isa_narrowoop() ||
1693          derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1694  const TypePtr *tj = derived->bottom_type()->isa_ptr();
1695  // If its an OOP with a non-zero offset, then it is derived.
1696  if( tj == NULL || tj->_offset == 0 ) {
1697    derived_base_map[derived->_idx] = derived;
1698    return derived;
1699  }
1700  // Derived is NULL+offset?  Base is NULL!
1701  if( derived->is_Con() ) {
1702    Node *base = _matcher.mach_null();
1703    assert(base != NULL, "sanity");
1704    if (base->in(0) == NULL) {
1705      // Initialize it once and make it shared:
1706      // set control to _root and place it into Start block
1707      // (where top() node is placed).
1708      base->init_req(0, _cfg._root);
1709      Block *startb = _cfg._bbs[C->top()->_idx];
1710      startb->_nodes.insert(startb->find_node(C->top()), base );
1711      _cfg._bbs.map( base->_idx, startb );
1712      assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1713    }
1714    if (_lrg_map.live_range_id(base) == 0) {
1715      new_lrg(base, maxlrg++);
1716    }
1717    assert(base->in(0) == _cfg._root &&
1718           _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
1719    derived_base_map[derived->_idx] = base;
1720    return base;
1721  }
1722
1723  // Check for AddP-related opcodes
1724  if (!derived->is_Phi()) {
1725    assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
1726    Node *base = derived->in(AddPNode::Base);
1727    derived_base_map[derived->_idx] = base;
1728    return base;
1729  }
1730
1731  // Recursively find bases for Phis.
1732  // First check to see if we can avoid a base Phi here.
1733  Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1734  uint i;
1735  for( i = 2; i < derived->req(); i++ )
1736    if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1737      break;
1738  // Went to the end without finding any different bases?
1739  if( i == derived->req() ) {   // No need for a base Phi here
1740    derived_base_map[derived->_idx] = base;
1741    return base;
1742  }
1743
1744  // Now we see we need a base-Phi here to merge the bases
1745  const Type *t = base->bottom_type();
1746  base = new (C) PhiNode( derived->in(0), t );
1747  for( i = 1; i < derived->req(); i++ ) {
1748    base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1749    t = t->meet(base->in(i)->bottom_type());
1750  }
1751  base->as_Phi()->set_type(t);
1752
1753  // Search the current block for an existing base-Phi
1754  Block *b = _cfg._bbs[derived->_idx];
1755  for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1756    Node *phi = b->_nodes[i];
1757    if( !phi->is_Phi() ) {      // Found end of Phis with no match?
1758      b->_nodes.insert( i, base ); // Must insert created Phi here as base
1759      _cfg._bbs.map( base->_idx, b );
1760      new_lrg(base,maxlrg++);
1761      break;
1762    }
1763    // See if Phi matches.
1764    uint j;
1765    for( j = 1; j < base->req(); j++ )
1766      if( phi->in(j) != base->in(j) &&
1767          !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1768        break;
1769    if( j == base->req() ) {    // All inputs match?
1770      base = phi;               // Then use existing 'phi' and drop 'base'
1771      break;
1772    }
1773  }
1774
1775
1776  // Cache info for later passes
1777  derived_base_map[derived->_idx] = base;
1778  return base;
1779}
1780
1781
1782//------------------------------stretch_base_pointer_live_ranges---------------
1783// At each Safepoint, insert extra debug edges for each pair of derived value/
1784// base pointer that is live across the Safepoint for oopmap building.  The
1785// edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1786// required edge set.
1787bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1788  int must_recompute_live = false;
1789  uint maxlrg = _lrg_map.max_lrg_id();
1790  Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1791  memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1792
1793  // For all blocks in RPO do...
1794  for( uint i=0; i<_cfg._num_blocks; i++ ) {
1795    Block *b = _cfg._blocks[i];
1796    // Note use of deep-copy constructor.  I cannot hammer the original
1797    // liveout bits, because they are needed by the following coalesce pass.
1798    IndexSet liveout(_live->live(b));
1799
1800    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
1801      Node *n = b->_nodes[j-1];
1802
1803      // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
1804      // like to see in the same register.  Compare uses the loop-phi and so
1805      // extends its live range BUT cannot be part of the cycle.  If this
1806      // extended live range overlaps with the update of the loop-phi value
1807      // we need both alive at the same time -- which requires at least 1
1808      // copy.  But because Intel has only 2-address registers we end up with
1809      // at least 2 copies, one before the loop-phi update instruction and
1810      // one after.  Instead we split the input to the compare just after the
1811      // phi.
1812      if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1813        Node *phi = n->in(1);
1814        if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1815          Block *phi_block = _cfg._bbs[phi->_idx];
1816          if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
1817            const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1818            Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
1819            insert_proj( phi_block, 1, spill, maxlrg++ );
1820            n->set_req(1,spill);
1821            must_recompute_live = true;
1822          }
1823        }
1824      }
1825
1826      // Get value being defined
1827      uint lidx = _lrg_map.live_range_id(n);
1828      // Ignore the occasional brand-new live range
1829      if (lidx && lidx < _lrg_map.max_lrg_id()) {
1830        // Remove from live-out set
1831        liveout.remove(lidx);
1832
1833        // Copies do not define a new value and so do not interfere.
1834        // Remove the copies source from the liveout set before interfering.
1835        uint idx = n->is_Copy();
1836        if (idx) {
1837          liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1838        }
1839      }
1840
1841      // Found a safepoint?
1842      JVMState *jvms = n->jvms();
1843      if( jvms ) {
1844        // Now scan for a live derived pointer
1845        IndexSetIterator elements(&liveout);
1846        uint neighbor;
1847        while ((neighbor = elements.next()) != 0) {
1848          // Find reaching DEF for base and derived values
1849          // This works because we are still in SSA during this call.
1850          Node *derived = lrgs(neighbor)._def;
1851          const TypePtr *tj = derived->bottom_type()->isa_ptr();
1852          assert(!derived->bottom_type()->isa_narrowoop() ||
1853                  derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1854          // If its an OOP with a non-zero offset, then it is derived.
1855          if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1856            Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1857            assert(base->_idx < _lrg_map.size(), "");
1858            // Add reaching DEFs of derived pointer and base pointer as a
1859            // pair of inputs
1860            n->add_req(derived);
1861            n->add_req(base);
1862
1863            // See if the base pointer is already live to this point.
1864            // Since I'm working on the SSA form, live-ness amounts to
1865            // reaching def's.  So if I find the base's live range then
1866            // I know the base's def reaches here.
1867            if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1868                 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1869                 (_lrg_map.live_range_id(base) > 0) && // not a constant
1870                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
1871              // Base pointer is not currently live.  Since I stretched
1872              // the base pointer to here and it crosses basic-block
1873              // boundaries, the global live info is now incorrect.
1874              // Recompute live.
1875              must_recompute_live = true;
1876            } // End of if base pointer is not live to debug info
1877          }
1878        } // End of scan all live data for derived ptrs crossing GC point
1879      } // End of if found a GC point
1880
1881      // Make all inputs live
1882      if (!n->is_Phi()) {      // Phi function uses come from prior block
1883        for (uint k = 1; k < n->req(); k++) {
1884          uint lidx = _lrg_map.live_range_id(n->in(k));
1885          if (lidx < _lrg_map.max_lrg_id()) {
1886            liveout.insert(lidx);
1887          }
1888        }
1889      }
1890
1891    } // End of forall instructions in block
1892    liveout.clear();  // Free the memory used by liveout.
1893
1894  } // End of forall blocks
1895  _lrg_map.set_max_lrg_id(maxlrg);
1896
1897  // If I created a new live range I need to recompute live
1898  if (maxlrg != _ifg->_maxlrg) {
1899    must_recompute_live = true;
1900  }
1901
1902  return must_recompute_live != 0;
1903}
1904
1905
1906//------------------------------add_reference----------------------------------
1907// Extend the node to LRG mapping
1908
1909void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1910  _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1911}
1912
1913//------------------------------dump-------------------------------------------
1914#ifndef PRODUCT
1915void PhaseChaitin::dump(const Node *n) const {
1916  uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1917  tty->print("L%d",r);
1918  if (r && n->Opcode() != Op_Phi) {
1919    if( _node_regs ) {          // Got a post-allocation copy of allocation?
1920      tty->print("[");
1921      OptoReg::Name second = get_reg_second(n);
1922      if( OptoReg::is_valid(second) ) {
1923        if( OptoReg::is_reg(second) )
1924          tty->print("%s:",Matcher::regName[second]);
1925        else
1926          tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1927      }
1928      OptoReg::Name first = get_reg_first(n);
1929      if( OptoReg::is_reg(first) )
1930        tty->print("%s]",Matcher::regName[first]);
1931      else
1932         tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1933    } else
1934    n->out_RegMask().dump();
1935  }
1936  tty->print("/N%d\t",n->_idx);
1937  tty->print("%s === ", n->Name());
1938  uint k;
1939  for (k = 0; k < n->req(); k++) {
1940    Node *m = n->in(k);
1941    if (!m) {
1942      tty->print("_ ");
1943    }
1944    else {
1945      uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1946      tty->print("L%d",r);
1947      // Data MultiNode's can have projections with no real registers.
1948      // Don't die while dumping them.
1949      int op = n->Opcode();
1950      if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1951        if( _node_regs ) {
1952          tty->print("[");
1953          OptoReg::Name second = get_reg_second(n->in(k));
1954          if( OptoReg::is_valid(second) ) {
1955            if( OptoReg::is_reg(second) )
1956              tty->print("%s:",Matcher::regName[second]);
1957            else
1958              tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1959                         reg2offset_unchecked(second));
1960          }
1961          OptoReg::Name first = get_reg_first(n->in(k));
1962          if( OptoReg::is_reg(first) )
1963            tty->print("%s]",Matcher::regName[first]);
1964          else
1965            tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1966                       reg2offset_unchecked(first));
1967        } else
1968          n->in_RegMask(k).dump();
1969      }
1970      tty->print("/N%d ",m->_idx);
1971    }
1972  }
1973  if( k < n->len() && n->in(k) ) tty->print("| ");
1974  for( ; k < n->len(); k++ ) {
1975    Node *m = n->in(k);
1976    if(!m) {
1977      break;
1978    }
1979    uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1980    tty->print("L%d",r);
1981    tty->print("/N%d ",m->_idx);
1982  }
1983  if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1984  else n->dump_spec(tty);
1985  if( _spilled_once.test(n->_idx ) ) {
1986    tty->print(" Spill_1");
1987    if( _spilled_twice.test(n->_idx ) )
1988      tty->print(" Spill_2");
1989  }
1990  tty->print("\n");
1991}
1992
1993void PhaseChaitin::dump( const Block * b ) const {
1994  b->dump_head( &_cfg._bbs );
1995
1996  // For all instructions
1997  for( uint j = 0; j < b->_nodes.size(); j++ )
1998    dump(b->_nodes[j]);
1999  // Print live-out info at end of block
2000  if( _live ) {
2001    tty->print("Liveout: ");
2002    IndexSet *live = _live->live(b);
2003    IndexSetIterator elements(live);
2004    tty->print("{");
2005    uint i;
2006    while ((i = elements.next()) != 0) {
2007      tty->print("L%d ", _lrg_map.find_const(i));
2008    }
2009    tty->print_cr("}");
2010  }
2011  tty->print("\n");
2012}
2013
2014void PhaseChaitin::dump() const {
2015  tty->print( "--- Chaitin -- argsize: %d  framesize: %d ---\n",
2016              _matcher._new_SP, _framesize );
2017
2018  // For all blocks
2019  for( uint i = 0; i < _cfg._num_blocks; i++ )
2020    dump(_cfg._blocks[i]);
2021  // End of per-block dump
2022  tty->print("\n");
2023
2024  if (!_ifg) {
2025    tty->print("(No IFG.)\n");
2026    return;
2027  }
2028
2029  // Dump LRG array
2030  tty->print("--- Live RanGe Array ---\n");
2031  for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2032    tty->print("L%d: ",i2);
2033    if (i2 < _ifg->_maxlrg) {
2034      lrgs(i2).dump();
2035    }
2036    else {
2037      tty->print_cr("new LRG");
2038    }
2039  }
2040  tty->print_cr("");
2041
2042  // Dump lo-degree list
2043  tty->print("Lo degree: ");
2044  for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2045    tty->print("L%d ",i3);
2046  tty->print_cr("");
2047
2048  // Dump lo-stk-degree list
2049  tty->print("Lo stk degree: ");
2050  for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2051    tty->print("L%d ",i4);
2052  tty->print_cr("");
2053
2054  // Dump lo-degree list
2055  tty->print("Hi degree: ");
2056  for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2057    tty->print("L%d ",i5);
2058  tty->print_cr("");
2059}
2060
2061//------------------------------dump_degree_lists------------------------------
2062void PhaseChaitin::dump_degree_lists() const {
2063  // Dump lo-degree list
2064  tty->print("Lo degree: ");
2065  for( uint i = _lo_degree; i; i = lrgs(i)._next )
2066    tty->print("L%d ",i);
2067  tty->print_cr("");
2068
2069  // Dump lo-stk-degree list
2070  tty->print("Lo stk degree: ");
2071  for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2072    tty->print("L%d ",i2);
2073  tty->print_cr("");
2074
2075  // Dump lo-degree list
2076  tty->print("Hi degree: ");
2077  for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2078    tty->print("L%d ",i3);
2079  tty->print_cr("");
2080}
2081
2082//------------------------------dump_simplified--------------------------------
2083void PhaseChaitin::dump_simplified() const {
2084  tty->print("Simplified: ");
2085  for( uint i = _simplified; i; i = lrgs(i)._next )
2086    tty->print("L%d ",i);
2087  tty->print_cr("");
2088}
2089
2090static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2091  if ((int)reg < 0)
2092    sprintf(buf, "<OptoReg::%d>", (int)reg);
2093  else if (OptoReg::is_reg(reg))
2094    strcpy(buf, Matcher::regName[reg]);
2095  else
2096    sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2097            pc->reg2offset(reg));
2098  return buf+strlen(buf);
2099}
2100
2101//------------------------------dump_register----------------------------------
2102// Dump a register name into a buffer.  Be intelligent if we get called
2103// before allocation is complete.
2104char *PhaseChaitin::dump_register( const Node *n, char *buf  ) const {
2105  if( !this ) {                 // Not got anything?
2106    sprintf(buf,"N%d",n->_idx); // Then use Node index
2107  } else if( _node_regs ) {
2108    // Post allocation, use direct mappings, no LRG info available
2109    print_reg( get_reg_first(n), this, buf );
2110  } else {
2111    uint lidx = _lrg_map.find_const(n); // Grab LRG number
2112    if( !_ifg ) {
2113      sprintf(buf,"L%d",lidx);  // No register binding yet
2114    } else if( !lidx ) {        // Special, not allocated value
2115      strcpy(buf,"Special");
2116    } else {
2117      if (lrgs(lidx)._is_vector) {
2118        if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2119          print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2120        else
2121          sprintf(buf,"L%d",lidx); // No register binding yet
2122      } else if( (lrgs(lidx).num_regs() == 1)
2123                 ? lrgs(lidx).mask().is_bound1()
2124                 : lrgs(lidx).mask().is_bound_pair() ) {
2125        // Hah!  We have a bound machine register
2126        print_reg( lrgs(lidx).reg(), this, buf );
2127      } else {
2128        sprintf(buf,"L%d",lidx); // No register binding yet
2129      }
2130    }
2131  }
2132  return buf+strlen(buf);
2133}
2134
2135//----------------------dump_for_spill_split_recycle--------------------------
2136void PhaseChaitin::dump_for_spill_split_recycle() const {
2137  if( WizardMode && (PrintCompilation || PrintOpto) ) {
2138    // Display which live ranges need to be split and the allocator's state
2139    tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2140    for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2141      if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2142        tty->print("L%d: ", bidx);
2143        lrgs(bidx).dump();
2144      }
2145    }
2146    tty->cr();
2147    dump();
2148  }
2149}
2150
2151//------------------------------dump_frame------------------------------------
2152void PhaseChaitin::dump_frame() const {
2153  const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2154  const TypeTuple *domain = C->tf()->domain();
2155  const int        argcnt = domain->cnt() - TypeFunc::Parms;
2156
2157  // Incoming arguments in registers dump
2158  for( int k = 0; k < argcnt; k++ ) {
2159    OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2160    if( OptoReg::is_reg(parmreg))  {
2161      const char *reg_name = OptoReg::regname(parmreg);
2162      tty->print("#r%3.3d %s", parmreg, reg_name);
2163      parmreg = _matcher._parm_regs[k].second();
2164      if( OptoReg::is_reg(parmreg))  {
2165        tty->print(":%s", OptoReg::regname(parmreg));
2166      }
2167      tty->print("   : parm %d: ", k);
2168      domain->field_at(k + TypeFunc::Parms)->dump();
2169      tty->print_cr("");
2170    }
2171  }
2172
2173  // Check for un-owned padding above incoming args
2174  OptoReg::Name reg = _matcher._new_SP;
2175  if( reg > _matcher._in_arg_limit ) {
2176    reg = OptoReg::add(reg, -1);
2177    tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2178  }
2179
2180  // Incoming argument area dump
2181  OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2182  while( reg > begin_in_arg ) {
2183    reg = OptoReg::add(reg, -1);
2184    tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2185    int j;
2186    for( j = 0; j < argcnt; j++) {
2187      if( _matcher._parm_regs[j].first() == reg ||
2188          _matcher._parm_regs[j].second() == reg ) {
2189        tty->print("parm %d: ",j);
2190        domain->field_at(j + TypeFunc::Parms)->dump();
2191        tty->print_cr("");
2192        break;
2193      }
2194    }
2195    if( j >= argcnt )
2196      tty->print_cr("HOLE, owned by SELF");
2197  }
2198
2199  // Old outgoing preserve area
2200  while( reg > _matcher._old_SP ) {
2201    reg = OptoReg::add(reg, -1);
2202    tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2203  }
2204
2205  // Old SP
2206  tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2207    reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2208
2209  // Preserve area dump
2210  int fixed_slots = C->fixed_slots();
2211  OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2212  OptoReg::Name return_addr = _matcher.return_addr();
2213
2214  reg = OptoReg::add(reg, -1);
2215  while (OptoReg::is_stack(reg)) {
2216    tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2217    if (return_addr == reg) {
2218      tty->print_cr("return address");
2219    } else if (reg >= begin_in_preserve) {
2220      // Preserved slots are present on x86
2221      if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2222        tty->print_cr("saved fp register");
2223      else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2224               VerifyStackAtCalls)
2225        tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
2226      else
2227        tty->print_cr("in_preserve");
2228    } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2229      tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2230    } else {
2231      tty->print_cr("pad2, stack alignment");
2232    }
2233    reg = OptoReg::add(reg, -1);
2234  }
2235
2236  // Spill area dump
2237  reg = OptoReg::add(_matcher._new_SP, _framesize );
2238  while( reg > _matcher._out_arg_limit ) {
2239    reg = OptoReg::add(reg, -1);
2240    tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2241  }
2242
2243  // Outgoing argument area dump
2244  while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2245    reg = OptoReg::add(reg, -1);
2246    tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2247  }
2248
2249  // Outgoing new preserve area
2250  while( reg > _matcher._new_SP ) {
2251    reg = OptoReg::add(reg, -1);
2252    tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2253  }
2254  tty->print_cr("#");
2255}
2256
2257//------------------------------dump_bb----------------------------------------
2258void PhaseChaitin::dump_bb( uint pre_order ) const {
2259  tty->print_cr("---dump of B%d---",pre_order);
2260  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2261    Block *b = _cfg._blocks[i];
2262    if( b->_pre_order == pre_order )
2263      dump(b);
2264  }
2265}
2266
2267//------------------------------dump_lrg---------------------------------------
2268void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2269  tty->print_cr("---dump of L%d---",lidx);
2270
2271  if (_ifg) {
2272    if (lidx >= _lrg_map.max_lrg_id()) {
2273      tty->print("Attempt to print live range index beyond max live range.\n");
2274      return;
2275    }
2276    tty->print("L%d: ",lidx);
2277    if (lidx < _ifg->_maxlrg) {
2278      lrgs(lidx).dump();
2279    } else {
2280      tty->print_cr("new LRG");
2281    }
2282  }
2283  if( _ifg && lidx < _ifg->_maxlrg) {
2284    tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2285    _ifg->neighbors(lidx)->dump();
2286    tty->cr();
2287  }
2288  // For all blocks
2289  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2290    Block *b = _cfg._blocks[i];
2291    int dump_once = 0;
2292
2293    // For all instructions
2294    for( uint j = 0; j < b->_nodes.size(); j++ ) {
2295      Node *n = b->_nodes[j];
2296      if (_lrg_map.find_const(n) == lidx) {
2297        if (!dump_once++) {
2298          tty->cr();
2299          b->dump_head( &_cfg._bbs );
2300        }
2301        dump(n);
2302        continue;
2303      }
2304      if (!defs_only) {
2305        uint cnt = n->req();
2306        for( uint k = 1; k < cnt; k++ ) {
2307          Node *m = n->in(k);
2308          if (!m)  {
2309            continue;  // be robust in the dumper
2310          }
2311          if (_lrg_map.find_const(m) == lidx) {
2312            if (!dump_once++) {
2313              tty->cr();
2314              b->dump_head(&_cfg._bbs);
2315            }
2316            dump(n);
2317          }
2318        }
2319      }
2320    }
2321  } // End of per-block dump
2322  tty->cr();
2323}
2324#endif // not PRODUCT
2325
2326//------------------------------print_chaitin_statistics-------------------------------
2327int PhaseChaitin::_final_loads  = 0;
2328int PhaseChaitin::_final_stores = 0;
2329int PhaseChaitin::_final_memoves= 0;
2330int PhaseChaitin::_final_copies = 0;
2331double PhaseChaitin::_final_load_cost  = 0;
2332double PhaseChaitin::_final_store_cost = 0;
2333double PhaseChaitin::_final_memove_cost= 0;
2334double PhaseChaitin::_final_copy_cost  = 0;
2335int PhaseChaitin::_conserv_coalesce = 0;
2336int PhaseChaitin::_conserv_coalesce_pair = 0;
2337int PhaseChaitin::_conserv_coalesce_trie = 0;
2338int PhaseChaitin::_conserv_coalesce_quad = 0;
2339int PhaseChaitin::_post_alloc = 0;
2340int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2341int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2342int PhaseChaitin::_used_cisc_instructions   = 0;
2343int PhaseChaitin::_unused_cisc_instructions = 0;
2344int PhaseChaitin::_allocator_attempts       = 0;
2345int PhaseChaitin::_allocator_successes      = 0;
2346
2347#ifndef PRODUCT
2348uint PhaseChaitin::_high_pressure           = 0;
2349uint PhaseChaitin::_low_pressure            = 0;
2350
2351void PhaseChaitin::print_chaitin_statistics() {
2352  tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2353  tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2354  tty->print_cr("Adjusted spill cost = %7.0f.",
2355                _final_load_cost*4.0 + _final_store_cost  * 2.0 +
2356                _final_copy_cost*1.0 + _final_memove_cost*12.0);
2357  tty->print("Conservatively coalesced %d copies, %d pairs",
2358                _conserv_coalesce, _conserv_coalesce_pair);
2359  if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2360    tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2361  tty->print_cr(", %d post alloc.", _post_alloc);
2362  if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2363    tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2364                  _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2365  if( _used_cisc_instructions || _unused_cisc_instructions )
2366    tty->print_cr("Used cisc instruction  %d,  remained in register %d",
2367                   _used_cisc_instructions, _unused_cisc_instructions);
2368  if( _allocator_successes != 0 )
2369    tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2370  tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2371}
2372#endif // not PRODUCT
2373