1/*
2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileLog.hpp"
27#include "compiler/oopMap.hpp"
28#include "memory/allocation.inline.hpp"
29#include "memory/resourceArea.hpp"
30#include "opto/addnode.hpp"
31#include "opto/block.hpp"
32#include "opto/callnode.hpp"
33#include "opto/cfgnode.hpp"
34#include "opto/chaitin.hpp"
35#include "opto/coalesce.hpp"
36#include "opto/connode.hpp"
37#include "opto/idealGraphPrinter.hpp"
38#include "opto/indexSet.hpp"
39#include "opto/machnode.hpp"
40#include "opto/memnode.hpp"
41#include "opto/movenode.hpp"
42#include "opto/opcodes.hpp"
43#include "opto/rootnode.hpp"
44#include "utilities/align.hpp"
45
46#ifndef PRODUCT
47void LRG::dump() const {
48  ttyLocker ttyl;
49  tty->print("%d ",num_regs());
50  _mask.dump();
51  if( _msize_valid ) {
52    if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
53    else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
54  } else {
55    tty->print(", #?(%d) ",_mask.Size());
56  }
57
58  tty->print("EffDeg: ");
59  if( _degree_valid ) tty->print( "%d ", _eff_degree );
60  else tty->print("? ");
61
62  if( is_multidef() ) {
63    tty->print("MultiDef ");
64    if (_defs != NULL) {
65      tty->print("(");
66      for (int i = 0; i < _defs->length(); i++) {
67        tty->print("N%d ", _defs->at(i)->_idx);
68      }
69      tty->print(") ");
70    }
71  }
72  else if( _def == 0 ) tty->print("Dead ");
73  else tty->print("Def: N%d ",_def->_idx);
74
75  tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
76  // Flags
77  if( _is_oop ) tty->print("Oop ");
78  if( _is_float ) tty->print("Float ");
79  if( _is_vector ) tty->print("Vector ");
80  if( _was_spilled1 ) tty->print("Spilled ");
81  if( _was_spilled2 ) tty->print("Spilled2 ");
82  if( _direct_conflict ) tty->print("Direct_conflict ");
83  if( _fat_proj ) tty->print("Fat ");
84  if( _was_lo ) tty->print("Lo ");
85  if( _has_copy ) tty->print("Copy ");
86  if( _at_risk ) tty->print("Risk ");
87
88  if( _must_spill ) tty->print("Must_spill ");
89  if( _is_bound ) tty->print("Bound ");
90  if( _msize_valid ) {
91    if( _degree_valid && lo_degree() ) tty->print("Trivial ");
92  }
93
94  tty->cr();
95}
96#endif
97
98// Compute score from cost and area.  Low score is best to spill.
99static double raw_score( double cost, double area ) {
100  return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
101}
102
103double LRG::score() const {
104  // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
105  // Bigger area lowers score, encourages spilling this live range.
106  // Bigger cost raise score, prevents spilling this live range.
107  // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
108  // to turn a divide by a constant into a multiply by the reciprical).
109  double score = raw_score( _cost, _area);
110
111  // Account for area.  Basically, LRGs covering large areas are better
112  // to spill because more other LRGs get freed up.
113  if( _area == 0.0 )            // No area?  Then no progress to spill
114    return 1e35;
115
116  if( _was_spilled2 )           // If spilled once before, we are unlikely
117    return score + 1e30;        // to make progress again.
118
119  if( _cost >= _area*3.0 )      // Tiny area relative to cost
120    return score + 1e17;        // Probably no progress to spill
121
122  if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
123    return score + 1e10;        // Likely no progress to spill
124
125  return score;
126}
127
128#define NUMBUCKS 3
129
130// Straight out of Tarjan's union-find algorithm
131uint LiveRangeMap::find_compress(uint lrg) {
132  uint cur = lrg;
133  uint next = _uf_map.at(cur);
134  while (next != cur) { // Scan chain of equivalences
135    assert( next < cur, "always union smaller");
136    cur = next; // until find a fixed-point
137    next = _uf_map.at(cur);
138  }
139
140  // Core of union-find algorithm: update chain of
141  // equivalences to be equal to the root.
142  while (lrg != next) {
143    uint tmp = _uf_map.at(lrg);
144    _uf_map.at_put(lrg, next);
145    lrg = tmp;
146  }
147  return lrg;
148}
149
150// Reset the Union-Find map to identity
151void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
152  _max_lrg_id= max_lrg_id;
153  // Force the Union-Find mapping to be at least this large
154  _uf_map.at_put_grow(_max_lrg_id, 0);
155  // Initialize it to be the ID mapping.
156  for (uint i = 0; i < _max_lrg_id; ++i) {
157    _uf_map.at_put(i, i);
158  }
159}
160
161// Make all Nodes map directly to their final live range; no need for
162// the Union-Find mapping after this call.
163void LiveRangeMap::compress_uf_map_for_nodes() {
164  // For all Nodes, compress mapping
165  uint unique = _names.length();
166  for (uint i = 0; i < unique; ++i) {
167    uint lrg = _names.at(i);
168    uint compressed_lrg = find(lrg);
169    if (lrg != compressed_lrg) {
170      _names.at_put(i, compressed_lrg);
171    }
172  }
173}
174
175// Like Find above, but no path compress, so bad asymptotic behavior
176uint LiveRangeMap::find_const(uint lrg) const {
177  if (!lrg) {
178    return lrg; // Ignore the zero LRG
179  }
180
181  // Off the end?  This happens during debugging dumps when you got
182  // brand new live ranges but have not told the allocator yet.
183  if (lrg >= _max_lrg_id) {
184    return lrg;
185  }
186
187  uint next = _uf_map.at(lrg);
188  while (next != lrg) { // Scan chain of equivalences
189    assert(next < lrg, "always union smaller");
190    lrg = next; // until find a fixed-point
191    next = _uf_map.at(lrg);
192  }
193  return next;
194}
195
196PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool scheduling_info_generated)
197  : PhaseRegAlloc(unique, cfg, matcher,
198#ifndef PRODUCT
199       print_chaitin_statistics
200#else
201       NULL
202#endif
203       )
204  , _lrg_map(Thread::current()->resource_area(), unique)
205  , _live(0)
206  , _spilled_once(Thread::current()->resource_area())
207  , _spilled_twice(Thread::current()->resource_area())
208  , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
209  , _oldphi(unique)
210  , _scheduling_info_generated(scheduling_info_generated)
211  , _sched_int_pressure(0, INTPRESSURE)
212  , _sched_float_pressure(0, FLOATPRESSURE)
213  , _scratch_int_pressure(0, INTPRESSURE)
214  , _scratch_float_pressure(0, FLOATPRESSURE)
215#ifndef PRODUCT
216  , _trace_spilling(C->directive()->TraceSpillingOption)
217#endif
218{
219  Compile::TracePhase tp("ctorChaitin", &timers[_t_ctorChaitin]);
220
221  _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
222
223  // Build a list of basic blocks, sorted by frequency
224  _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
225  // Experiment with sorting strategies to speed compilation
226  double  cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
227  Block **buckets[NUMBUCKS];             // Array of buckets
228  uint    buckcnt[NUMBUCKS];             // Array of bucket counters
229  double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
230  for (uint i = 0; i < NUMBUCKS; i++) {
231    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
232    buckcnt[i] = 0;
233    // Bump by three orders of magnitude each time
234    cutoff *= 0.001;
235    buckval[i] = cutoff;
236    for (uint j = 0; j < _cfg.number_of_blocks(); j++) {
237      buckets[i][j] = NULL;
238    }
239  }
240  // Sort blocks into buckets
241  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
242    for (uint j = 0; j < NUMBUCKS; j++) {
243      if ((j == NUMBUCKS - 1) || (_cfg.get_block(i)->_freq > buckval[j])) {
244        // Assign block to end of list for appropriate bucket
245        buckets[j][buckcnt[j]++] = _cfg.get_block(i);
246        break; // kick out of inner loop
247      }
248    }
249  }
250  // Dump buckets into final block array
251  uint blkcnt = 0;
252  for (uint i = 0; i < NUMBUCKS; i++) {
253    for (uint j = 0; j < buckcnt[i]; j++) {
254      _blks[blkcnt++] = buckets[i][j];
255    }
256  }
257
258  assert(blkcnt == _cfg.number_of_blocks(), "Block array not totally filled");
259}
260
261// union 2 sets together.
262void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
263  uint src = _lrg_map.find(src_n);
264  uint dst = _lrg_map.find(dst_n);
265  assert(src, "");
266  assert(dst, "");
267  assert(src < _lrg_map.max_lrg_id(), "oob");
268  assert(dst < _lrg_map.max_lrg_id(), "oob");
269  assert(src < dst, "always union smaller");
270  _lrg_map.uf_map(dst, src);
271}
272
273void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
274  // Make the Node->LRG mapping
275  _lrg_map.extend(x->_idx,lrg);
276  // Make the Union-Find mapping an identity function
277  _lrg_map.uf_extend(lrg, lrg);
278}
279
280
281int PhaseChaitin::clone_projs(Block* b, uint idx, Node* orig, Node* copy, uint& max_lrg_id) {
282  assert(b->find_node(copy) == (idx - 1), "incorrect insert index for copy kill projections");
283  DEBUG_ONLY( Block* borig = _cfg.get_block_for_node(orig); )
284  int found_projs = 0;
285  uint cnt = orig->outcnt();
286  for (uint i = 0; i < cnt; i++) {
287    Node* proj = orig->raw_out(i);
288    if (proj->is_MachProj()) {
289      assert(proj->outcnt() == 0, "only kill projections are expected here");
290      assert(_cfg.get_block_for_node(proj) == borig, "incorrect block for kill projections");
291      found_projs++;
292      // Copy kill projections after the cloned node
293      Node* kills = proj->clone();
294      kills->set_req(0, copy);
295      b->insert_node(kills, idx++);
296      _cfg.map_node_to_block(kills, b);
297      new_lrg(kills, max_lrg_id++);
298    }
299  }
300  return found_projs;
301}
302
303// Renumber the live ranges to compact them.  Makes the IFG smaller.
304void PhaseChaitin::compact() {
305  Compile::TracePhase tp("chaitinCompact", &timers[_t_chaitinCompact]);
306
307  // Current the _uf_map contains a series of short chains which are headed
308  // by a self-cycle.  All the chains run from big numbers to little numbers.
309  // The Find() call chases the chains & shortens them for the next Find call.
310  // We are going to change this structure slightly.  Numbers above a moving
311  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
312  // compacted live range with no further chaining.  There are no chains or
313  // cycles below 'i', so the Find call no longer works.
314  uint j=1;
315  uint i;
316  for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
317    uint lr = _lrg_map.uf_live_range_id(i);
318    // Ignore unallocated live ranges
319    if (!lr) {
320      continue;
321    }
322    assert(lr <= i, "");
323    _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
324  }
325  // Now change the Node->LR mapping to reflect the compacted names
326  uint unique = _lrg_map.size();
327  for (i = 0; i < unique; i++) {
328    uint lrg_id = _lrg_map.live_range_id(i);
329    _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
330  }
331
332  // Reset the Union-Find mapping
333  _lrg_map.reset_uf_map(j);
334}
335
336void PhaseChaitin::Register_Allocate() {
337
338  // Above the OLD FP (and in registers) are the incoming arguments.  Stack
339  // slots in this area are called "arg_slots".  Above the NEW FP (and in
340  // registers) is the outgoing argument area; above that is the spill/temp
341  // area.  These are all "frame_slots".  Arg_slots start at the zero
342  // stack_slots and count up to the known arg_size.  Frame_slots start at
343  // the stack_slot #arg_size and go up.  After allocation I map stack
344  // slots to actual offsets.  Stack-slots in the arg_slot area are biased
345  // by the frame_size; stack-slots in the frame_slot area are biased by 0.
346
347  _trip_cnt = 0;
348  _alternate = 0;
349  _matcher._allocation_started = true;
350
351  ResourceArea split_arena;     // Arena for Split local resources
352  ResourceArea live_arena;      // Arena for liveness & IFG info
353  ResourceMark rm(&live_arena);
354
355  // Need live-ness for the IFG; need the IFG for coalescing.  If the
356  // liveness is JUST for coalescing, then I can get some mileage by renaming
357  // all copy-related live ranges low and then using the max copy-related
358  // live range as a cut-off for LIVE and the IFG.  In other words, I can
359  // build a subset of LIVE and IFG just for copies.
360  PhaseLive live(_cfg, _lrg_map.names(), &live_arena, false);
361
362  // Need IFG for coalescing and coloring
363  PhaseIFG ifg(&live_arena);
364  _ifg = &ifg;
365
366  // Come out of SSA world to the Named world.  Assign (virtual) registers to
367  // Nodes.  Use the same register for all inputs and the output of PhiNodes
368  // - effectively ending SSA form.  This requires either coalescing live
369  // ranges or inserting copies.  For the moment, we insert "virtual copies"
370  // - we pretend there is a copy prior to each Phi in predecessor blocks.
371  // We will attempt to coalesce such "virtual copies" before we manifest
372  // them for real.
373  de_ssa();
374
375#ifdef ASSERT
376  // Veify the graph before RA.
377  verify(&live_arena);
378#endif
379
380  {
381    Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
382    _live = NULL;                 // Mark live as being not available
383    rm.reset_to_mark();           // Reclaim working storage
384    IndexSet::reset_memory(C, &live_arena);
385    ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
386    gather_lrg_masks( false );    // Collect LRG masks
387    live.compute(_lrg_map.max_lrg_id()); // Compute liveness
388    _live = &live;                // Mark LIVE as being available
389  }
390
391  // Base pointers are currently "used" by instructions which define new
392  // derived pointers.  This makes base pointers live up to the where the
393  // derived pointer is made, but not beyond.  Really, they need to be live
394  // across any GC point where the derived value is live.  So this code looks
395  // at all the GC points, and "stretches" the live range of any base pointer
396  // to the GC point.
397  if (stretch_base_pointer_live_ranges(&live_arena)) {
398    Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]);
399    // Since some live range stretched, I need to recompute live
400    _live = NULL;
401    rm.reset_to_mark();         // Reclaim working storage
402    IndexSet::reset_memory(C, &live_arena);
403    ifg.init(_lrg_map.max_lrg_id());
404    gather_lrg_masks(false);
405    live.compute(_lrg_map.max_lrg_id());
406    _live = &live;
407  }
408  // Create the interference graph using virtual copies
409  build_ifg_virtual();  // Include stack slots this time
410
411  // The IFG is/was triangular.  I am 'squaring it up' so Union can run
412  // faster.  Union requires a 'for all' operation which is slow on the
413  // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
414  // meaning I can visit all the Nodes neighbors less than a Node in time
415  // O(# of neighbors), but I have to visit all the Nodes greater than a
416  // given Node and search them for an instance, i.e., time O(#MaxLRG)).
417  _ifg->SquareUp();
418
419  // Aggressive (but pessimistic) copy coalescing.
420  // This pass works on virtual copies.  Any virtual copies which are not
421  // coalesced get manifested as actual copies
422  {
423    Compile::TracePhase tp("chaitinCoalesce1", &timers[_t_chaitinCoalesce1]);
424
425    PhaseAggressiveCoalesce coalesce(*this);
426    coalesce.coalesce_driver();
427    // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
428    // not match the Phi itself, insert a copy.
429    coalesce.insert_copies(_matcher);
430    if (C->failing()) {
431      return;
432    }
433  }
434
435  // After aggressive coalesce, attempt a first cut at coloring.
436  // To color, we need the IFG and for that we need LIVE.
437  {
438    Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
439    _live = NULL;
440    rm.reset_to_mark();           // Reclaim working storage
441    IndexSet::reset_memory(C, &live_arena);
442    ifg.init(_lrg_map.max_lrg_id());
443    gather_lrg_masks( true );
444    live.compute(_lrg_map.max_lrg_id());
445    _live = &live;
446  }
447
448  // Build physical interference graph
449  uint must_spill = 0;
450  must_spill = build_ifg_physical(&live_arena);
451  // If we have a guaranteed spill, might as well spill now
452  if (must_spill) {
453    if(!_lrg_map.max_lrg_id()) {
454      return;
455    }
456    // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
457    C->check_node_count(10*must_spill, "out of nodes before split");
458    if (C->failing()) {
459      return;
460    }
461
462    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
463    _lrg_map.set_max_lrg_id(new_max_lrg_id);
464    // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
465    // or we failed to split
466    C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
467    if (C->failing()) {
468      return;
469    }
470
471    NOT_PRODUCT(C->verify_graph_edges();)
472
473    compact();                  // Compact LRGs; return new lower max lrg
474
475    {
476      Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
477      _live = NULL;
478      rm.reset_to_mark();         // Reclaim working storage
479      IndexSet::reset_memory(C, &live_arena);
480      ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
481      gather_lrg_masks( true );   // Collect intersect mask
482      live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
483      _live = &live;
484    }
485    build_ifg_physical(&live_arena);
486    _ifg->SquareUp();
487    _ifg->Compute_Effective_Degree();
488    // Only do conservative coalescing if requested
489    if (OptoCoalesce) {
490      Compile::TracePhase tp("chaitinCoalesce2", &timers[_t_chaitinCoalesce2]);
491      // Conservative (and pessimistic) copy coalescing of those spills
492      PhaseConservativeCoalesce coalesce(*this);
493      // If max live ranges greater than cutoff, don't color the stack.
494      // This cutoff can be larger than below since it is only done once.
495      coalesce.coalesce_driver();
496    }
497    _lrg_map.compress_uf_map_for_nodes();
498
499#ifdef ASSERT
500    verify(&live_arena, true);
501#endif
502  } else {
503    ifg.SquareUp();
504    ifg.Compute_Effective_Degree();
505#ifdef ASSERT
506    set_was_low();
507#endif
508  }
509
510  // Prepare for Simplify & Select
511  cache_lrg_info();           // Count degree of LRGs
512
513  // Simplify the InterFerence Graph by removing LRGs of low degree.
514  // LRGs of low degree are trivially colorable.
515  Simplify();
516
517  // Select colors by re-inserting LRGs back into the IFG in reverse order.
518  // Return whether or not something spills.
519  uint spills = Select( );
520
521  // If we spill, split and recycle the entire thing
522  while( spills ) {
523    if( _trip_cnt++ > 24 ) {
524      DEBUG_ONLY( dump_for_spill_split_recycle(); )
525      if( _trip_cnt > 27 ) {
526        C->record_method_not_compilable("failed spill-split-recycle sanity check");
527        return;
528      }
529    }
530
531    if (!_lrg_map.max_lrg_id()) {
532      return;
533    }
534    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
535    _lrg_map.set_max_lrg_id(new_max_lrg_id);
536    // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
537    C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
538    if (C->failing()) {
539      return;
540    }
541
542    compact(); // Compact LRGs; return new lower max lrg
543
544    // Nuke the live-ness and interference graph and LiveRanGe info
545    {
546      Compile::TracePhase tp("computeLive", &timers[_t_computeLive]);
547      _live = NULL;
548      rm.reset_to_mark();         // Reclaim working storage
549      IndexSet::reset_memory(C, &live_arena);
550      ifg.init(_lrg_map.max_lrg_id());
551
552      // Create LiveRanGe array.
553      // Intersect register masks for all USEs and DEFs
554      gather_lrg_masks(true);
555      live.compute(_lrg_map.max_lrg_id());
556      _live = &live;
557    }
558    must_spill = build_ifg_physical(&live_arena);
559    _ifg->SquareUp();
560    _ifg->Compute_Effective_Degree();
561
562    // Only do conservative coalescing if requested
563    if (OptoCoalesce) {
564      Compile::TracePhase tp("chaitinCoalesce3", &timers[_t_chaitinCoalesce3]);
565      // Conservative (and pessimistic) copy coalescing
566      PhaseConservativeCoalesce coalesce(*this);
567      // Check for few live ranges determines how aggressive coalesce is.
568      coalesce.coalesce_driver();
569    }
570    _lrg_map.compress_uf_map_for_nodes();
571#ifdef ASSERT
572    verify(&live_arena, true);
573#endif
574    cache_lrg_info();           // Count degree of LRGs
575
576    // Simplify the InterFerence Graph by removing LRGs of low degree.
577    // LRGs of low degree are trivially colorable.
578    Simplify();
579
580    // Select colors by re-inserting LRGs back into the IFG in reverse order.
581    // Return whether or not something spills.
582    spills = Select();
583  }
584
585  // Count number of Simplify-Select trips per coloring success.
586  _allocator_attempts += _trip_cnt + 1;
587  _allocator_successes += 1;
588
589  // Peephole remove copies
590  post_allocate_copy_removal();
591
592  // Merge multidefs if multiple defs representing the same value are used in a single block.
593  merge_multidefs();
594
595#ifdef ASSERT
596  // Veify the graph after RA.
597  verify(&live_arena);
598#endif
599
600  // max_reg is past the largest *register* used.
601  // Convert that to a frame_slot number.
602  if (_max_reg <= _matcher._new_SP) {
603    _framesize = C->out_preserve_stack_slots();
604  }
605  else {
606    _framesize = _max_reg -_matcher._new_SP;
607  }
608  assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
609
610  // This frame must preserve the required fp alignment
611  _framesize = align_up(_framesize, Matcher::stack_alignment_in_slots());
612  assert(_framesize <= 1000000, "sanity check");
613#ifndef PRODUCT
614  _total_framesize += _framesize;
615  if ((int)_framesize > _max_framesize) {
616    _max_framesize = _framesize;
617  }
618#endif
619
620  // Convert CISC spills
621  fixup_spills();
622
623  // Log regalloc results
624  CompileLog* log = Compile::current()->log();
625  if (log != NULL) {
626    log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
627  }
628
629  if (C->failing()) {
630    return;
631  }
632
633  NOT_PRODUCT(C->verify_graph_edges();)
634
635  // Move important info out of the live_arena to longer lasting storage.
636  alloc_node_regs(_lrg_map.size());
637  for (uint i=0; i < _lrg_map.size(); i++) {
638    if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
639      LRG &lrg = lrgs(_lrg_map.live_range_id(i));
640      if (!lrg.alive()) {
641        set_bad(i);
642      } else if (lrg.num_regs() == 1) {
643        set1(i, lrg.reg());
644      } else {                  // Must be a register-set
645        if (!lrg._fat_proj) {   // Must be aligned adjacent register set
646          // Live ranges record the highest register in their mask.
647          // We want the low register for the AD file writer's convenience.
648          OptoReg::Name hi = lrg.reg(); // Get hi register
649          OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
650          // We have to use pair [lo,lo+1] even for wide vectors because
651          // the rest of code generation works only with pairs. It is safe
652          // since for registers encoding only 'lo' is used.
653          // Second reg from pair is used in ScheduleAndBundle on SPARC where
654          // vector max size is 8 which corresponds to registers pair.
655          // It is also used in BuildOopMaps but oop operations are not
656          // vectorized.
657          set2(i, lo);
658        } else {                // Misaligned; extract 2 bits
659          OptoReg::Name hi = lrg.reg(); // Get hi register
660          lrg.Remove(hi);       // Yank from mask
661          int lo = lrg.mask().find_first_elem(); // Find lo
662          set_pair(i, hi, lo);
663        }
664      }
665      if( lrg._is_oop ) _node_oops.set(i);
666    } else {
667      set_bad(i);
668    }
669  }
670
671  // Done!
672  _live = NULL;
673  _ifg = NULL;
674  C->set_indexSet_arena(NULL);  // ResourceArea is at end of scope
675}
676
677void PhaseChaitin::de_ssa() {
678  // Set initial Names for all Nodes.  Most Nodes get the virtual register
679  // number.  A few get the ZERO live range number.  These do not
680  // get allocated, but instead rely on correct scheduling to ensure that
681  // only one instance is simultaneously live at a time.
682  uint lr_counter = 1;
683  for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
684    Block* block = _cfg.get_block(i);
685    uint cnt = block->number_of_nodes();
686
687    // Handle all the normal Nodes in the block
688    for( uint j = 0; j < cnt; j++ ) {
689      Node *n = block->get_node(j);
690      // Pre-color to the zero live range, or pick virtual register
691      const RegMask &rm = n->out_RegMask();
692      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
693    }
694  }
695
696  // Reset the Union-Find mapping to be identity
697  _lrg_map.reset_uf_map(lr_counter);
698}
699
700void PhaseChaitin::mark_ssa() {
701  // Use ssa names to populate the live range maps or if no mask
702  // is available, use the 0 entry.
703  uint max_idx = 0;
704  for ( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
705    Block* block = _cfg.get_block(i);
706    uint cnt = block->number_of_nodes();
707
708    // Handle all the normal Nodes in the block
709    for ( uint j = 0; j < cnt; j++ ) {
710      Node *n = block->get_node(j);
711      // Pre-color to the zero live range, or pick virtual register
712      const RegMask &rm = n->out_RegMask();
713      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? n->_idx : 0);
714      max_idx = (n->_idx > max_idx) ? n->_idx : max_idx;
715    }
716  }
717  _lrg_map.set_max_lrg_id(max_idx+1);
718
719  // Reset the Union-Find mapping to be identity
720  _lrg_map.reset_uf_map(max_idx+1);
721}
722
723
724// Gather LiveRanGe information, including register masks.  Modification of
725// cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
726void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
727
728  // Nail down the frame pointer live range
729  uint fp_lrg = _lrg_map.live_range_id(_cfg.get_root_node()->in(1)->in(TypeFunc::FramePtr));
730  lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
731
732  // For all blocks
733  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
734    Block* block = _cfg.get_block(i);
735
736    // For all instructions
737    for (uint j = 1; j < block->number_of_nodes(); j++) {
738      Node* n = block->get_node(j);
739      uint input_edge_start =1; // Skip control most nodes
740      bool is_machine_node = false;
741      if (n->is_Mach()) {
742        is_machine_node = true;
743        input_edge_start = n->as_Mach()->oper_input_base();
744      }
745      uint idx = n->is_Copy();
746
747      // Get virtual register number, same as LiveRanGe index
748      uint vreg = _lrg_map.live_range_id(n);
749      LRG& lrg = lrgs(vreg);
750      if (vreg) {              // No vreg means un-allocable (e.g. memory)
751
752        // Collect has-copy bit
753        if (idx) {
754          lrg._has_copy = 1;
755          uint clidx = _lrg_map.live_range_id(n->in(idx));
756          LRG& copy_src = lrgs(clidx);
757          copy_src._has_copy = 1;
758        }
759
760        // Check for float-vs-int live range (used in register-pressure
761        // calculations)
762        const Type *n_type = n->bottom_type();
763        if (n_type->is_floatingpoint()) {
764          lrg._is_float = 1;
765        }
766
767        // Check for twice prior spilling.  Once prior spilling might have
768        // spilled 'soft', 2nd prior spill should have spilled 'hard' and
769        // further spilling is unlikely to make progress.
770        if (_spilled_once.test(n->_idx)) {
771          lrg._was_spilled1 = 1;
772          if (_spilled_twice.test(n->_idx)) {
773            lrg._was_spilled2 = 1;
774          }
775        }
776
777#ifndef PRODUCT
778        if (trace_spilling() && lrg._def != NULL) {
779          // collect defs for MultiDef printing
780          if (lrg._defs == NULL) {
781            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
782            lrg._defs->append(lrg._def);
783          }
784          lrg._defs->append(n);
785        }
786#endif
787
788        // Check for a single def LRG; these can spill nicely
789        // via rematerialization.  Flag as NULL for no def found
790        // yet, or 'n' for single def or -1 for many defs.
791        lrg._def = lrg._def ? NodeSentinel : n;
792
793        // Limit result register mask to acceptable registers
794        const RegMask &rm = n->out_RegMask();
795        lrg.AND( rm );
796
797        uint ireg = n->ideal_reg();
798        assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
799                "oops must be in Op_RegP's" );
800
801        // Check for vector live range (only if vector register is used).
802        // On SPARC vector uses RegD which could be misaligned so it is not
803        // processes as vector in RA.
804        if (RegMask::is_vector(ireg))
805          lrg._is_vector = 1;
806        assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL,
807               "vector must be in vector registers");
808
809        // Check for bound register masks
810        const RegMask &lrgmask = lrg.mask();
811        if (lrgmask.is_bound(ireg)) {
812          lrg._is_bound = 1;
813        }
814
815        // Check for maximum frequency value
816        if (lrg._maxfreq < block->_freq) {
817          lrg._maxfreq = block->_freq;
818        }
819
820        // Check for oop-iness, or long/double
821        // Check for multi-kill projection
822        switch (ireg) {
823        case MachProjNode::fat_proj:
824          // Fat projections have size equal to number of registers killed
825          lrg.set_num_regs(rm.Size());
826          lrg.set_reg_pressure(lrg.num_regs());
827          lrg._fat_proj = 1;
828          lrg._is_bound = 1;
829          break;
830        case Op_RegP:
831#ifdef _LP64
832          lrg.set_num_regs(2);  // Size is 2 stack words
833#else
834          lrg.set_num_regs(1);  // Size is 1 stack word
835#endif
836          // Register pressure is tracked relative to the maximum values
837          // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
838          // and relative to other types which compete for the same regs.
839          //
840          // The following table contains suggested values based on the
841          // architectures as defined in each .ad file.
842          // INTPRESSURE and FLOATPRESSURE may be tuned differently for
843          // compile-speed or performance.
844          // Note1:
845          // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
846          // since .ad registers are defined as high and low halves.
847          // These reg_pressure values remain compatible with the code
848          // in is_high_pressure() which relates get_invalid_mask_size(),
849          // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
850          // Note2:
851          // SPARC -d32 has 24 registers available for integral values,
852          // but only 10 of these are safe for 64-bit longs.
853          // Using set_reg_pressure(2) for both int and long means
854          // the allocator will believe it can fit 26 longs into
855          // registers.  Using 2 for longs and 1 for ints means the
856          // allocator will attempt to put 52 integers into registers.
857          // The settings below limit this problem to methods with
858          // many long values which are being run on 32-bit SPARC.
859          //
860          // ------------------- reg_pressure --------------------
861          // Each entry is reg_pressure_per_value,number_of_regs
862          //         RegL  RegI  RegFlags   RegF RegD    INTPRESSURE  FLOATPRESSURE
863          // IA32     2     1     1          1    1          6           6
864          // IA64     1     1     1          1    1         50          41
865          // SPARC    2     2     2          2    2         48 (24)     52 (26)
866          // SPARCV9  2     2     2          2    2         48 (24)     52 (26)
867          // AMD64    1     1     1          1    1         14          15
868          // -----------------------------------------------------
869#if defined(SPARC)
870          lrg.set_reg_pressure(2);  // use for v9 as well
871#else
872          lrg.set_reg_pressure(1);  // normally one value per register
873#endif
874          if( n_type->isa_oop_ptr() ) {
875            lrg._is_oop = 1;
876          }
877          break;
878        case Op_RegL:           // Check for long or double
879        case Op_RegD:
880          lrg.set_num_regs(2);
881          // Define platform specific register pressure
882#if defined(SPARC) || defined(ARM32)
883          lrg.set_reg_pressure(2);
884#elif defined(IA32)
885          if( ireg == Op_RegL ) {
886            lrg.set_reg_pressure(2);
887          } else {
888            lrg.set_reg_pressure(1);
889          }
890#else
891          lrg.set_reg_pressure(1);  // normally one value per register
892#endif
893          // If this def of a double forces a mis-aligned double,
894          // flag as '_fat_proj' - really flag as allowing misalignment
895          // AND changes how we count interferences.  A mis-aligned
896          // double can interfere with TWO aligned pairs, or effectively
897          // FOUR registers!
898          if (rm.is_misaligned_pair()) {
899            lrg._fat_proj = 1;
900            lrg._is_bound = 1;
901          }
902          break;
903        case Op_RegF:
904        case Op_RegI:
905        case Op_RegN:
906        case Op_RegFlags:
907        case 0:                 // not an ideal register
908          lrg.set_num_regs(1);
909#ifdef SPARC
910          lrg.set_reg_pressure(2);
911#else
912          lrg.set_reg_pressure(1);
913#endif
914          break;
915        case Op_VecS:
916          assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
917          assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
918          lrg.set_num_regs(RegMask::SlotsPerVecS);
919          lrg.set_reg_pressure(1);
920          break;
921        case Op_VecD:
922          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
923          assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
924          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
925          lrg.set_num_regs(RegMask::SlotsPerVecD);
926          lrg.set_reg_pressure(1);
927          break;
928        case Op_VecX:
929          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
930          assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
931          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
932          lrg.set_num_regs(RegMask::SlotsPerVecX);
933          lrg.set_reg_pressure(1);
934          break;
935        case Op_VecY:
936          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
937          assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
938          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
939          lrg.set_num_regs(RegMask::SlotsPerVecY);
940          lrg.set_reg_pressure(1);
941          break;
942        case Op_VecZ:
943          assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecZ), "sanity");
944          assert(RegMask::num_registers(Op_VecZ) == RegMask::SlotsPerVecZ, "sanity");
945          assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecZ), "vector should be aligned");
946          lrg.set_num_regs(RegMask::SlotsPerVecZ);
947          lrg.set_reg_pressure(1);
948          break;
949        default:
950          ShouldNotReachHere();
951        }
952      }
953
954      // Now do the same for inputs
955      uint cnt = n->req();
956      // Setup for CISC SPILLING
957      uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
958      if( UseCISCSpill && after_aggressive ) {
959        inp = n->cisc_operand();
960        if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
961          // Convert operand number to edge index number
962          inp = n->as_Mach()->operand_index(inp);
963      }
964
965      // Prepare register mask for each input
966      for( uint k = input_edge_start; k < cnt; k++ ) {
967        uint vreg = _lrg_map.live_range_id(n->in(k));
968        if (!vreg) {
969          continue;
970        }
971
972        // If this instruction is CISC Spillable, add the flags
973        // bit to its appropriate input
974        if( UseCISCSpill && after_aggressive && inp == k ) {
975#ifndef PRODUCT
976          if( TraceCISCSpill ) {
977            tty->print("  use_cisc_RegMask: ");
978            n->dump();
979          }
980#endif
981          n->as_Mach()->use_cisc_RegMask();
982        }
983
984        if (is_machine_node && _scheduling_info_generated) {
985          MachNode* cur_node = n->as_Mach();
986          // this is cleaned up by register allocation
987          if (k >= cur_node->num_opnds()) continue;
988        }
989
990        LRG &lrg = lrgs(vreg);
991        // // Testing for floating point code shape
992        // Node *test = n->in(k);
993        // if( test->is_Mach() ) {
994        //   MachNode *m = test->as_Mach();
995        //   int  op = m->ideal_Opcode();
996        //   if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
997        //     int zzz = 1;
998        //   }
999        // }
1000
1001        // Limit result register mask to acceptable registers.
1002        // Do not limit registers from uncommon uses before
1003        // AggressiveCoalesce.  This effectively pre-virtual-splits
1004        // around uncommon uses of common defs.
1005        const RegMask &rm = n->in_RegMask(k);
1006        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * block->_freq) {
1007          // Since we are BEFORE aggressive coalesce, leave the register
1008          // mask untrimmed by the call.  This encourages more coalescing.
1009          // Later, AFTER aggressive, this live range will have to spill
1010          // but the spiller handles slow-path calls very nicely.
1011        } else {
1012          lrg.AND( rm );
1013        }
1014
1015        // Check for bound register masks
1016        const RegMask &lrgmask = lrg.mask();
1017        uint kreg = n->in(k)->ideal_reg();
1018        bool is_vect = RegMask::is_vector(kreg);
1019        assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
1020               is_vect || kreg == Op_RegD || kreg == Op_RegL,
1021               "vector must be in vector registers");
1022        if (lrgmask.is_bound(kreg))
1023          lrg._is_bound = 1;
1024
1025        // If this use of a double forces a mis-aligned double,
1026        // flag as '_fat_proj' - really flag as allowing misalignment
1027        // AND changes how we count interferences.  A mis-aligned
1028        // double can interfere with TWO aligned pairs, or effectively
1029        // FOUR registers!
1030#ifdef ASSERT
1031        if (is_vect && !_scheduling_info_generated) {
1032          if (lrg.num_regs() != 0) {
1033            assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
1034            assert(!lrg._fat_proj, "sanity");
1035            assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
1036          } else {
1037            assert(n->is_Phi(), "not all inputs processed only if Phi");
1038          }
1039        }
1040#endif
1041        if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
1042          lrg._fat_proj = 1;
1043          lrg._is_bound = 1;
1044        }
1045        // if the LRG is an unaligned pair, we will have to spill
1046        // so clear the LRG's register mask if it is not already spilled
1047        if (!is_vect && !n->is_SpillCopy() &&
1048            (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
1049            lrgmask.is_misaligned_pair()) {
1050          lrg.Clear();
1051        }
1052
1053        // Check for maximum frequency value
1054        if (lrg._maxfreq < block->_freq) {
1055          lrg._maxfreq = block->_freq;
1056        }
1057
1058      } // End for all allocated inputs
1059    } // end for all instructions
1060  } // end for all blocks
1061
1062  // Final per-liverange setup
1063  for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1064    LRG &lrg = lrgs(i2);
1065    assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1066    if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1067      lrg.clear_to_sets();
1068    }
1069    lrg.compute_set_mask_size();
1070    if (lrg.not_free()) {      // Handle case where we lose from the start
1071      lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1072      lrg._direct_conflict = 1;
1073    }
1074    lrg.set_degree(0);          // no neighbors in IFG yet
1075  }
1076}
1077
1078// Set the was-lo-degree bit.  Conservative coalescing should not change the
1079// colorability of the graph.  If any live range was of low-degree before
1080// coalescing, it should Simplify.  This call sets the was-lo-degree bit.
1081// The bit is checked in Simplify.
1082void PhaseChaitin::set_was_low() {
1083#ifdef ASSERT
1084  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1085    int size = lrgs(i).num_regs();
1086    uint old_was_lo = lrgs(i)._was_lo;
1087    lrgs(i)._was_lo = 0;
1088    if( lrgs(i).lo_degree() ) {
1089      lrgs(i)._was_lo = 1;      // Trivially of low degree
1090    } else {                    // Else check the Brigg's assertion
1091      // Brigg's observation is that the lo-degree neighbors of a
1092      // hi-degree live range will not interfere with the color choices
1093      // of said hi-degree live range.  The Simplify reverse-stack-coloring
1094      // order takes care of the details.  Hence you do not have to count
1095      // low-degree neighbors when determining if this guy colors.
1096      int briggs_degree = 0;
1097      IndexSet *s = _ifg->neighbors(i);
1098      IndexSetIterator elements(s);
1099      uint lidx;
1100      while((lidx = elements.next()) != 0) {
1101        if( !lrgs(lidx).lo_degree() )
1102          briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1103      }
1104      if( briggs_degree < lrgs(i).degrees_of_freedom() )
1105        lrgs(i)._was_lo = 1;    // Low degree via the briggs assertion
1106    }
1107    assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1108  }
1109#endif
1110}
1111
1112#define REGISTER_CONSTRAINED 16
1113
1114// Compute cost/area ratio, in case we spill.  Build the lo-degree list.
1115void PhaseChaitin::cache_lrg_info( ) {
1116  Compile::TracePhase tp("chaitinCacheLRG", &timers[_t_chaitinCacheLRG]);
1117
1118  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1119    LRG &lrg = lrgs(i);
1120
1121    // Check for being of low degree: means we can be trivially colored.
1122    // Low degree, dead or must-spill guys just get to simplify right away
1123    if( lrg.lo_degree() ||
1124       !lrg.alive() ||
1125        lrg._must_spill ) {
1126      // Split low degree list into those guys that must get a
1127      // register and those that can go to register or stack.
1128      // The idea is LRGs that can go register or stack color first when
1129      // they have a good chance of getting a register.  The register-only
1130      // lo-degree live ranges always get a register.
1131      OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1132      if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1133        lrg._next = _lo_stk_degree;
1134        _lo_stk_degree = i;
1135      } else {
1136        lrg._next = _lo_degree;
1137        _lo_degree = i;
1138      }
1139    } else {                    // Else high degree
1140      lrgs(_hi_degree)._prev = i;
1141      lrg._next = _hi_degree;
1142      lrg._prev = 0;
1143      _hi_degree = i;
1144    }
1145  }
1146}
1147
1148// Simplify the IFG by removing LRGs of low degree that have NO copies
1149void PhaseChaitin::Pre_Simplify( ) {
1150
1151  // Warm up the lo-degree no-copy list
1152  int lo_no_copy = 0;
1153  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1154    if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1155        !lrgs(i).alive() ||
1156        lrgs(i)._must_spill) {
1157      lrgs(i)._next = lo_no_copy;
1158      lo_no_copy = i;
1159    }
1160  }
1161
1162  while( lo_no_copy ) {
1163    uint lo = lo_no_copy;
1164    lo_no_copy = lrgs(lo)._next;
1165    int size = lrgs(lo).num_regs();
1166
1167    // Put the simplified guy on the simplified list.
1168    lrgs(lo)._next = _simplified;
1169    _simplified = lo;
1170
1171    // Yank this guy from the IFG.
1172    IndexSet *adj = _ifg->remove_node( lo );
1173
1174    // If any neighbors' degrees fall below their number of
1175    // allowed registers, then put that neighbor on the low degree
1176    // list.  Note that 'degree' can only fall and 'numregs' is
1177    // unchanged by this action.  Thus the two are equal at most once,
1178    // so LRGs hit the lo-degree worklists at most once.
1179    IndexSetIterator elements(adj);
1180    uint neighbor;
1181    while ((neighbor = elements.next()) != 0) {
1182      LRG *n = &lrgs(neighbor);
1183      assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1184
1185      // Check for just becoming of-low-degree
1186      if( n->just_lo_degree() && !n->_has_copy ) {
1187        assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1188        // Put on lo-degree list
1189        n->_next = lo_no_copy;
1190        lo_no_copy = neighbor;
1191      }
1192    }
1193  } // End of while lo-degree no_copy worklist not empty
1194
1195  // No more lo-degree no-copy live ranges to simplify
1196}
1197
1198// Simplify the IFG by removing LRGs of low degree.
1199void PhaseChaitin::Simplify( ) {
1200  Compile::TracePhase tp("chaitinSimplify", &timers[_t_chaitinSimplify]);
1201
1202  while( 1 ) {                  // Repeat till simplified it all
1203    // May want to explore simplifying lo_degree before _lo_stk_degree.
1204    // This might result in more spills coloring into registers during
1205    // Select().
1206    while( _lo_degree || _lo_stk_degree ) {
1207      // If possible, pull from lo_stk first
1208      uint lo;
1209      if( _lo_degree ) {
1210        lo = _lo_degree;
1211        _lo_degree = lrgs(lo)._next;
1212      } else {
1213        lo = _lo_stk_degree;
1214        _lo_stk_degree = lrgs(lo)._next;
1215      }
1216
1217      // Put the simplified guy on the simplified list.
1218      lrgs(lo)._next = _simplified;
1219      _simplified = lo;
1220      // If this guy is "at risk" then mark his current neighbors
1221      if( lrgs(lo)._at_risk ) {
1222        IndexSetIterator elements(_ifg->neighbors(lo));
1223        uint datum;
1224        while ((datum = elements.next()) != 0) {
1225          lrgs(datum)._risk_bias = lo;
1226        }
1227      }
1228
1229      // Yank this guy from the IFG.
1230      IndexSet *adj = _ifg->remove_node( lo );
1231
1232      // If any neighbors' degrees fall below their number of
1233      // allowed registers, then put that neighbor on the low degree
1234      // list.  Note that 'degree' can only fall and 'numregs' is
1235      // unchanged by this action.  Thus the two are equal at most once,
1236      // so LRGs hit the lo-degree worklist at most once.
1237      IndexSetIterator elements(adj);
1238      uint neighbor;
1239      while ((neighbor = elements.next()) != 0) {
1240        LRG *n = &lrgs(neighbor);
1241#ifdef ASSERT
1242        if( VerifyOpto || VerifyRegisterAllocator ) {
1243          assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1244        }
1245#endif
1246
1247        // Check for just becoming of-low-degree just counting registers.
1248        // _must_spill live ranges are already on the low degree list.
1249        if( n->just_lo_degree() && !n->_must_spill ) {
1250          assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1251          // Pull from hi-degree list
1252          uint prev = n->_prev;
1253          uint next = n->_next;
1254          if( prev ) lrgs(prev)._next = next;
1255          else _hi_degree = next;
1256          lrgs(next)._prev = prev;
1257          n->_next = _lo_degree;
1258          _lo_degree = neighbor;
1259        }
1260      }
1261    } // End of while lo-degree/lo_stk_degree worklist not empty
1262
1263    // Check for got everything: is hi-degree list empty?
1264    if( !_hi_degree ) break;
1265
1266    // Time to pick a potential spill guy
1267    uint lo_score = _hi_degree;
1268    double score = lrgs(lo_score).score();
1269    double area = lrgs(lo_score)._area;
1270    double cost = lrgs(lo_score)._cost;
1271    bool bound = lrgs(lo_score)._is_bound;
1272
1273    // Find cheapest guy
1274    debug_only( int lo_no_simplify=0; );
1275    for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1276      assert( !(*_ifg->_yanked)[i], "" );
1277      // It's just vaguely possible to move hi-degree to lo-degree without
1278      // going through a just-lo-degree stage: If you remove a double from
1279      // a float live range it's degree will drop by 2 and you can skip the
1280      // just-lo-degree stage.  It's very rare (shows up after 5000+ methods
1281      // in -Xcomp of Java2Demo).  So just choose this guy to simplify next.
1282      if( lrgs(i).lo_degree() ) {
1283        lo_score = i;
1284        break;
1285      }
1286      debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1287      double iscore = lrgs(i).score();
1288      double iarea = lrgs(i)._area;
1289      double icost = lrgs(i)._cost;
1290      bool ibound = lrgs(i)._is_bound;
1291
1292      // Compare cost/area of i vs cost/area of lo_score.  Smaller cost/area
1293      // wins.  Ties happen because all live ranges in question have spilled
1294      // a few times before and the spill-score adds a huge number which
1295      // washes out the low order bits.  We are choosing the lesser of 2
1296      // evils; in this case pick largest area to spill.
1297      // Ties also happen when live ranges are defined and used only inside
1298      // one block. In which case their area is 0 and score set to max.
1299      // In such case choose bound live range over unbound to free registers
1300      // or with smaller cost to spill.
1301      if( iscore < score ||
1302          (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1303          (iscore == score && iarea == area &&
1304           ( (ibound && !bound) || (ibound == bound && (icost < cost)) )) ) {
1305        lo_score = i;
1306        score = iscore;
1307        area = iarea;
1308        cost = icost;
1309        bound = ibound;
1310      }
1311    }
1312    LRG *lo_lrg = &lrgs(lo_score);
1313    // The live range we choose for spilling is either hi-degree, or very
1314    // rarely it can be low-degree.  If we choose a hi-degree live range
1315    // there better not be any lo-degree choices.
1316    assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1317
1318    // Pull from hi-degree list
1319    uint prev = lo_lrg->_prev;
1320    uint next = lo_lrg->_next;
1321    if( prev ) lrgs(prev)._next = next;
1322    else _hi_degree = next;
1323    lrgs(next)._prev = prev;
1324    // Jam him on the lo-degree list, despite his high degree.
1325    // Maybe he'll get a color, and maybe he'll spill.
1326    // Only Select() will know.
1327    lrgs(lo_score)._at_risk = true;
1328    _lo_degree = lo_score;
1329    lo_lrg->_next = 0;
1330
1331  } // End of while not simplified everything
1332
1333}
1334
1335// Is 'reg' register legal for 'lrg'?
1336static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1337  if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1338      lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1339    // RA uses OptoReg which represent the highest element of a registers set.
1340    // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1341    // in which XMMd is used by RA to represent such vectors. A double value
1342    // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1343    // The register mask uses largest bits set of overlapping register sets.
1344    // On x86 with AVX it uses 8 bits for each XMM registers set.
1345    //
1346    // The 'lrg' already has cleared-to-set register mask (done in Select()
1347    // before calling choose_color()). Passing mask.Member(reg) check above
1348    // indicates that the size (num_regs) of 'reg' set is less or equal to
1349    // 'lrg' set size.
1350    // For set size 1 any register which is member of 'lrg' mask is legal.
1351    if (lrg.num_regs()==1)
1352      return true;
1353    // For larger sets only an aligned register with the same set size is legal.
1354    int mask = lrg.num_regs()-1;
1355    if ((reg&mask) == mask)
1356      return true;
1357  }
1358  return false;
1359}
1360
1361// Choose a color using the biasing heuristic
1362OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1363
1364  // Check for "at_risk" LRG's
1365  uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1366  if( risk_lrg != 0 ) {
1367    // Walk the colored neighbors of the "at_risk" candidate
1368    // Choose a color which is both legal and already taken by a neighbor
1369    // of the "at_risk" candidate in order to improve the chances of the
1370    // "at_risk" candidate of coloring
1371    IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1372    uint datum;
1373    while ((datum = elements.next()) != 0) {
1374      OptoReg::Name reg = lrgs(datum).reg();
1375      // If this LRG's register is legal for us, choose it
1376      if (is_legal_reg(lrg, reg, chunk))
1377        return reg;
1378    }
1379  }
1380
1381  uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1382  if( copy_lrg != 0 ) {
1383    // If he has a color,
1384    if( !(*(_ifg->_yanked))[copy_lrg] ) {
1385      OptoReg::Name reg = lrgs(copy_lrg).reg();
1386      //  And it is legal for you,
1387      if (is_legal_reg(lrg, reg, chunk))
1388        return reg;
1389    } else if( chunk == 0 ) {
1390      // Choose a color which is legal for him
1391      RegMask tempmask = lrg.mask();
1392      tempmask.AND(lrgs(copy_lrg).mask());
1393      tempmask.clear_to_sets(lrg.num_regs());
1394      OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1395      if (OptoReg::is_valid(reg))
1396        return reg;
1397    }
1398  }
1399
1400  // If no bias info exists, just go with the register selection ordering
1401  if (lrg._is_vector || lrg.num_regs() == 2) {
1402    // Find an aligned set
1403    return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1404  }
1405
1406  // CNC - Fun hack.  Alternate 1st and 2nd selection.  Enables post-allocate
1407  // copy removal to remove many more copies, by preventing a just-assigned
1408  // register from being repeatedly assigned.
1409  OptoReg::Name reg = lrg.mask().find_first_elem();
1410  if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1411    // This 'Remove; find; Insert' idiom is an expensive way to find the
1412    // SECOND element in the mask.
1413    lrg.Remove(reg);
1414    OptoReg::Name reg2 = lrg.mask().find_first_elem();
1415    lrg.Insert(reg);
1416    if( OptoReg::is_reg(reg2))
1417      reg = reg2;
1418  }
1419  return OptoReg::add( reg, chunk );
1420}
1421
1422// Choose a color in the current chunk
1423OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1424  assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1425  assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1426
1427  if( lrg.num_regs() == 1 ||    // Common Case
1428      !lrg._fat_proj )          // Aligned+adjacent pairs ok
1429    // Use a heuristic to "bias" the color choice
1430    return bias_color(lrg, chunk);
1431
1432  assert(!lrg._is_vector, "should be not vector here" );
1433  assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1434
1435  // Fat-proj case or misaligned double argument.
1436  assert(lrg.compute_mask_size() == lrg.num_regs() ||
1437         lrg.num_regs() == 2,"fat projs exactly color" );
1438  assert( !chunk, "always color in 1st chunk" );
1439  // Return the highest element in the set.
1440  return lrg.mask().find_last_elem();
1441}
1442
1443// Select colors by re-inserting LRGs back into the IFG.  LRGs are re-inserted
1444// in reverse order of removal.  As long as nothing of hi-degree was yanked,
1445// everything going back is guaranteed a color.  Select that color.  If some
1446// hi-degree LRG cannot get a color then we record that we must spill.
1447uint PhaseChaitin::Select( ) {
1448  Compile::TracePhase tp("chaitinSelect", &timers[_t_chaitinSelect]);
1449
1450  uint spill_reg = LRG::SPILL_REG;
1451  _max_reg = OptoReg::Name(0);  // Past max register used
1452  while( _simplified ) {
1453    // Pull next LRG from the simplified list - in reverse order of removal
1454    uint lidx = _simplified;
1455    LRG *lrg = &lrgs(lidx);
1456    _simplified = lrg->_next;
1457
1458
1459#ifndef PRODUCT
1460    if (trace_spilling()) {
1461      ttyLocker ttyl;
1462      tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1463                    lrg->degrees_of_freedom());
1464      lrg->dump();
1465    }
1466#endif
1467
1468    // Re-insert into the IFG
1469    _ifg->re_insert(lidx);
1470    if( !lrg->alive() ) continue;
1471    // capture allstackedness flag before mask is hacked
1472    const int is_allstack = lrg->mask().is_AllStack();
1473
1474    // Yeah, yeah, yeah, I know, I know.  I can refactor this
1475    // to avoid the GOTO, although the refactored code will not
1476    // be much clearer.  We arrive here IFF we have a stack-based
1477    // live range that cannot color in the current chunk, and it
1478    // has to move into the next free stack chunk.
1479    int chunk = 0;              // Current chunk is first chunk
1480    retry_next_chunk:
1481
1482    // Remove neighbor colors
1483    IndexSet *s = _ifg->neighbors(lidx);
1484
1485    debug_only(RegMask orig_mask = lrg->mask();)
1486    IndexSetIterator elements(s);
1487    uint neighbor;
1488    while ((neighbor = elements.next()) != 0) {
1489      // Note that neighbor might be a spill_reg.  In this case, exclusion
1490      // of its color will be a no-op, since the spill_reg chunk is in outer
1491      // space.  Also, if neighbor is in a different chunk, this exclusion
1492      // will be a no-op.  (Later on, if lrg runs out of possible colors in
1493      // its chunk, a new chunk of color may be tried, in which case
1494      // examination of neighbors is started again, at retry_next_chunk.)
1495      LRG &nlrg = lrgs(neighbor);
1496      OptoReg::Name nreg = nlrg.reg();
1497      // Only subtract masks in the same chunk
1498      if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1499#ifndef PRODUCT
1500        uint size = lrg->mask().Size();
1501        RegMask rm = lrg->mask();
1502#endif
1503        lrg->SUBTRACT(nlrg.mask());
1504#ifndef PRODUCT
1505        if (trace_spilling() && lrg->mask().Size() != size) {
1506          ttyLocker ttyl;
1507          tty->print("L%d ", lidx);
1508          rm.dump();
1509          tty->print(" intersected L%d ", neighbor);
1510          nlrg.mask().dump();
1511          tty->print(" removed ");
1512          rm.SUBTRACT(lrg->mask());
1513          rm.dump();
1514          tty->print(" leaving ");
1515          lrg->mask().dump();
1516          tty->cr();
1517        }
1518#endif
1519      }
1520    }
1521    //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1522    // Aligned pairs need aligned masks
1523    assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1524    if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1525      lrg->clear_to_sets();
1526    }
1527
1528    // Check if a color is available and if so pick the color
1529    OptoReg::Name reg = choose_color( *lrg, chunk );
1530#ifdef SPARC
1531    debug_only(lrg->compute_set_mask_size());
1532    assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1533#endif
1534
1535    //---------------
1536    // If we fail to color and the AllStack flag is set, trigger
1537    // a chunk-rollover event
1538    if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1539      // Bump register mask up to next stack chunk
1540      chunk += RegMask::CHUNK_SIZE;
1541      lrg->Set_All();
1542
1543      goto retry_next_chunk;
1544    }
1545
1546    //---------------
1547    // Did we get a color?
1548    else if( OptoReg::is_valid(reg)) {
1549#ifndef PRODUCT
1550      RegMask avail_rm = lrg->mask();
1551#endif
1552
1553      // Record selected register
1554      lrg->set_reg(reg);
1555
1556      if( reg >= _max_reg )     // Compute max register limit
1557        _max_reg = OptoReg::add(reg,1);
1558      // Fold reg back into normal space
1559      reg = OptoReg::add(reg,-chunk);
1560
1561      // If the live range is not bound, then we actually had some choices
1562      // to make.  In this case, the mask has more bits in it than the colors
1563      // chosen.  Restrict the mask to just what was picked.
1564      int n_regs = lrg->num_regs();
1565      assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1566      if (n_regs == 1 || !lrg->_fat_proj) {
1567        assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity");
1568        lrg->Clear();           // Clear the mask
1569        lrg->Insert(reg);       // Set regmask to match selected reg
1570        // For vectors and pairs, also insert the low bit of the pair
1571        for (int i = 1; i < n_regs; i++)
1572          lrg->Insert(OptoReg::add(reg,-i));
1573        lrg->set_mask_size(n_regs);
1574      } else {                  // Else fatproj
1575        // mask must be equal to fatproj bits, by definition
1576      }
1577#ifndef PRODUCT
1578      if (trace_spilling()) {
1579        ttyLocker ttyl;
1580        tty->print("L%d selected ", lidx);
1581        lrg->mask().dump();
1582        tty->print(" from ");
1583        avail_rm.dump();
1584        tty->cr();
1585      }
1586#endif
1587      // Note that reg is the highest-numbered register in the newly-bound mask.
1588    } // end color available case
1589
1590    //---------------
1591    // Live range is live and no colors available
1592    else {
1593      assert( lrg->alive(), "" );
1594      assert( !lrg->_fat_proj || lrg->is_multidef() ||
1595              lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1596      assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1597
1598      // Assign the special spillreg register
1599      lrg->set_reg(OptoReg::Name(spill_reg++));
1600      // Do not empty the regmask; leave mask_size lying around
1601      // for use during Spilling
1602#ifndef PRODUCT
1603      if( trace_spilling() ) {
1604        ttyLocker ttyl;
1605        tty->print("L%d spilling with neighbors: ", lidx);
1606        s->dump();
1607        debug_only(tty->print(" original mask: "));
1608        debug_only(orig_mask.dump());
1609        dump_lrg(lidx);
1610      }
1611#endif
1612    } // end spill case
1613
1614  }
1615
1616  return spill_reg-LRG::SPILL_REG;      // Return number of spills
1617}
1618
1619// Copy 'was_spilled'-edness from the source Node to the dst Node.
1620void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1621  if( _spilled_once.test(src->_idx) ) {
1622    _spilled_once.set(dst->_idx);
1623    lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1624    if( _spilled_twice.test(src->_idx) ) {
1625      _spilled_twice.set(dst->_idx);
1626      lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1627    }
1628  }
1629}
1630
1631// Set the 'spilled_once' or 'spilled_twice' flag on a node.
1632void PhaseChaitin::set_was_spilled( Node *n ) {
1633  if( _spilled_once.test_set(n->_idx) )
1634    _spilled_twice.set(n->_idx);
1635}
1636
1637// Convert Ideal spill instructions into proper FramePtr + offset Loads and
1638// Stores.  Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1639void PhaseChaitin::fixup_spills() {
1640  // This function does only cisc spill work.
1641  if( !UseCISCSpill ) return;
1642
1643  Compile::TracePhase tp("fixupSpills", &timers[_t_fixupSpills]);
1644
1645  // Grab the Frame Pointer
1646  Node *fp = _cfg.get_root_block()->head()->in(1)->in(TypeFunc::FramePtr);
1647
1648  // For all blocks
1649  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1650    Block* block = _cfg.get_block(i);
1651
1652    // For all instructions in block
1653    uint last_inst = block->end_idx();
1654    for (uint j = 1; j <= last_inst; j++) {
1655      Node* n = block->get_node(j);
1656
1657      // Dead instruction???
1658      assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1659              C->top() == n ||  // Or the random TOP node
1660              n->is_Proj(),     // Or a fat-proj kill node
1661              "No dead instructions after post-alloc" );
1662
1663      int inp = n->cisc_operand();
1664      if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1665        // Convert operand number to edge index number
1666        MachNode *mach = n->as_Mach();
1667        inp = mach->operand_index(inp);
1668        Node *src = n->in(inp);   // Value to load or store
1669        LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1670        OptoReg::Name src_reg = lrg_cisc.reg();
1671        // Doubles record the HIGH register of an adjacent pair.
1672        src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1673        if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1674          // This is a CISC Spill, get stack offset and construct new node
1675#ifndef PRODUCT
1676          if( TraceCISCSpill ) {
1677            tty->print("    reg-instr:  ");
1678            n->dump();
1679          }
1680#endif
1681          int stk_offset = reg2offset(src_reg);
1682          // Bailout if we might exceed node limit when spilling this instruction
1683          C->check_node_count(0, "out of nodes fixing spills");
1684          if (C->failing())  return;
1685          // Transform node
1686          MachNode *cisc = mach->cisc_version(stk_offset)->as_Mach();
1687          cisc->set_req(inp,fp);          // Base register is frame pointer
1688          if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1689            assert( cisc->oper_input_base() == 2, "Only adding one edge");
1690            cisc->ins_req(1,src);         // Requires a memory edge
1691          }
1692          block->map_node(cisc, j);          // Insert into basic block
1693          n->subsume_by(cisc, C); // Correct graph
1694          //
1695          ++_used_cisc_instructions;
1696#ifndef PRODUCT
1697          if( TraceCISCSpill ) {
1698            tty->print("    cisc-instr: ");
1699            cisc->dump();
1700          }
1701#endif
1702        } else {
1703#ifndef PRODUCT
1704          if( TraceCISCSpill ) {
1705            tty->print("    using reg-instr: ");
1706            n->dump();
1707          }
1708#endif
1709          ++_unused_cisc_instructions;    // input can be on stack
1710        }
1711      }
1712
1713    } // End of for all instructions
1714
1715  } // End of for all blocks
1716}
1717
1718// Helper to stretch above; recursively discover the base Node for a
1719// given derived Node.  Easy for AddP-related machine nodes, but needs
1720// to be recursive for derived Phis.
1721Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1722  // See if already computed; if so return it
1723  if( derived_base_map[derived->_idx] )
1724    return derived_base_map[derived->_idx];
1725
1726  // See if this happens to be a base.
1727  // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1728  // pointers derived from NULL!  These are always along paths that
1729  // can't happen at run-time but the optimizer cannot deduce it so
1730  // we have to handle it gracefully.
1731  assert(!derived->bottom_type()->isa_narrowoop() ||
1732          derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1733  const TypePtr *tj = derived->bottom_type()->isa_ptr();
1734  // If its an OOP with a non-zero offset, then it is derived.
1735  if( tj == NULL || tj->_offset == 0 ) {
1736    derived_base_map[derived->_idx] = derived;
1737    return derived;
1738  }
1739  // Derived is NULL+offset?  Base is NULL!
1740  if( derived->is_Con() ) {
1741    Node *base = _matcher.mach_null();
1742    assert(base != NULL, "sanity");
1743    if (base->in(0) == NULL) {
1744      // Initialize it once and make it shared:
1745      // set control to _root and place it into Start block
1746      // (where top() node is placed).
1747      base->init_req(0, _cfg.get_root_node());
1748      Block *startb = _cfg.get_block_for_node(C->top());
1749      uint node_pos = startb->find_node(C->top());
1750      startb->insert_node(base, node_pos);
1751      _cfg.map_node_to_block(base, startb);
1752      assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1753
1754      // The loadConP0 might have projection nodes depending on architecture
1755      // Add the projection nodes to the CFG
1756      for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
1757        Node* use = base->fast_out(i);
1758        if (use->is_MachProj()) {
1759          startb->insert_node(use, ++node_pos);
1760          _cfg.map_node_to_block(use, startb);
1761          new_lrg(use, maxlrg++);
1762        }
1763      }
1764    }
1765    if (_lrg_map.live_range_id(base) == 0) {
1766      new_lrg(base, maxlrg++);
1767    }
1768    assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
1769    derived_base_map[derived->_idx] = base;
1770    return base;
1771  }
1772
1773  // Check for AddP-related opcodes
1774  if (!derived->is_Phi()) {
1775    assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, "but is: %s", derived->Name());
1776    Node *base = derived->in(AddPNode::Base);
1777    derived_base_map[derived->_idx] = base;
1778    return base;
1779  }
1780
1781  // Recursively find bases for Phis.
1782  // First check to see if we can avoid a base Phi here.
1783  Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1784  uint i;
1785  for( i = 2; i < derived->req(); i++ )
1786    if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1787      break;
1788  // Went to the end without finding any different bases?
1789  if( i == derived->req() ) {   // No need for a base Phi here
1790    derived_base_map[derived->_idx] = base;
1791    return base;
1792  }
1793
1794  // Now we see we need a base-Phi here to merge the bases
1795  const Type *t = base->bottom_type();
1796  base = new PhiNode( derived->in(0), t );
1797  for( i = 1; i < derived->req(); i++ ) {
1798    base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1799    t = t->meet(base->in(i)->bottom_type());
1800  }
1801  base->as_Phi()->set_type(t);
1802
1803  // Search the current block for an existing base-Phi
1804  Block *b = _cfg.get_block_for_node(derived);
1805  for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1806    Node *phi = b->get_node(i);
1807    if( !phi->is_Phi() ) {      // Found end of Phis with no match?
1808      b->insert_node(base,  i); // Must insert created Phi here as base
1809      _cfg.map_node_to_block(base, b);
1810      new_lrg(base,maxlrg++);
1811      break;
1812    }
1813    // See if Phi matches.
1814    uint j;
1815    for( j = 1; j < base->req(); j++ )
1816      if( phi->in(j) != base->in(j) &&
1817          !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1818        break;
1819    if( j == base->req() ) {    // All inputs match?
1820      base = phi;               // Then use existing 'phi' and drop 'base'
1821      break;
1822    }
1823  }
1824
1825
1826  // Cache info for later passes
1827  derived_base_map[derived->_idx] = base;
1828  return base;
1829}
1830
1831// At each Safepoint, insert extra debug edges for each pair of derived value/
1832// base pointer that is live across the Safepoint for oopmap building.  The
1833// edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1834// required edge set.
1835bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1836  int must_recompute_live = false;
1837  uint maxlrg = _lrg_map.max_lrg_id();
1838  Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1839  memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1840
1841  // For all blocks in RPO do...
1842  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
1843    Block* block = _cfg.get_block(i);
1844    // Note use of deep-copy constructor.  I cannot hammer the original
1845    // liveout bits, because they are needed by the following coalesce pass.
1846    IndexSet liveout(_live->live(block));
1847
1848    for (uint j = block->end_idx() + 1; j > 1; j--) {
1849      Node* n = block->get_node(j - 1);
1850
1851      // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
1852      // like to see in the same register.  Compare uses the loop-phi and so
1853      // extends its live range BUT cannot be part of the cycle.  If this
1854      // extended live range overlaps with the update of the loop-phi value
1855      // we need both alive at the same time -- which requires at least 1
1856      // copy.  But because Intel has only 2-address registers we end up with
1857      // at least 2 copies, one before the loop-phi update instruction and
1858      // one after.  Instead we split the input to the compare just after the
1859      // phi.
1860      if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1861        Node *phi = n->in(1);
1862        if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1863          Block *phi_block = _cfg.get_block_for_node(phi);
1864          if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
1865            const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1866            Node *spill = new MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
1867            insert_proj( phi_block, 1, spill, maxlrg++ );
1868            n->set_req(1,spill);
1869            must_recompute_live = true;
1870          }
1871        }
1872      }
1873
1874      // Get value being defined
1875      uint lidx = _lrg_map.live_range_id(n);
1876      // Ignore the occasional brand-new live range
1877      if (lidx && lidx < _lrg_map.max_lrg_id()) {
1878        // Remove from live-out set
1879        liveout.remove(lidx);
1880
1881        // Copies do not define a new value and so do not interfere.
1882        // Remove the copies source from the liveout set before interfering.
1883        uint idx = n->is_Copy();
1884        if (idx) {
1885          liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1886        }
1887      }
1888
1889      // Found a safepoint?
1890      JVMState *jvms = n->jvms();
1891      if( jvms ) {
1892        // Now scan for a live derived pointer
1893        IndexSetIterator elements(&liveout);
1894        uint neighbor;
1895        while ((neighbor = elements.next()) != 0) {
1896          // Find reaching DEF for base and derived values
1897          // This works because we are still in SSA during this call.
1898          Node *derived = lrgs(neighbor)._def;
1899          const TypePtr *tj = derived->bottom_type()->isa_ptr();
1900          assert(!derived->bottom_type()->isa_narrowoop() ||
1901                  derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1902          // If its an OOP with a non-zero offset, then it is derived.
1903          if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1904            Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1905            assert(base->_idx < _lrg_map.size(), "");
1906            // Add reaching DEFs of derived pointer and base pointer as a
1907            // pair of inputs
1908            n->add_req(derived);
1909            n->add_req(base);
1910
1911            // See if the base pointer is already live to this point.
1912            // Since I'm working on the SSA form, live-ness amounts to
1913            // reaching def's.  So if I find the base's live range then
1914            // I know the base's def reaches here.
1915            if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1916                 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1917                 (_lrg_map.live_range_id(base) > 0) && // not a constant
1918                 _cfg.get_block_for_node(base) != block) { // base not def'd in blk)
1919              // Base pointer is not currently live.  Since I stretched
1920              // the base pointer to here and it crosses basic-block
1921              // boundaries, the global live info is now incorrect.
1922              // Recompute live.
1923              must_recompute_live = true;
1924            } // End of if base pointer is not live to debug info
1925          }
1926        } // End of scan all live data for derived ptrs crossing GC point
1927      } // End of if found a GC point
1928
1929      // Make all inputs live
1930      if (!n->is_Phi()) {      // Phi function uses come from prior block
1931        for (uint k = 1; k < n->req(); k++) {
1932          uint lidx = _lrg_map.live_range_id(n->in(k));
1933          if (lidx < _lrg_map.max_lrg_id()) {
1934            liveout.insert(lidx);
1935          }
1936        }
1937      }
1938
1939    } // End of forall instructions in block
1940    liveout.clear();  // Free the memory used by liveout.
1941
1942  } // End of forall blocks
1943  _lrg_map.set_max_lrg_id(maxlrg);
1944
1945  // If I created a new live range I need to recompute live
1946  if (maxlrg != _ifg->_maxlrg) {
1947    must_recompute_live = true;
1948  }
1949
1950  return must_recompute_live != 0;
1951}
1952
1953// Extend the node to LRG mapping
1954
1955void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1956  _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1957}
1958
1959#ifndef PRODUCT
1960void PhaseChaitin::dump(const Node *n) const {
1961  uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1962  tty->print("L%d",r);
1963  if (r && n->Opcode() != Op_Phi) {
1964    if( _node_regs ) {          // Got a post-allocation copy of allocation?
1965      tty->print("[");
1966      OptoReg::Name second = get_reg_second(n);
1967      if( OptoReg::is_valid(second) ) {
1968        if( OptoReg::is_reg(second) )
1969          tty->print("%s:",Matcher::regName[second]);
1970        else
1971          tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1972      }
1973      OptoReg::Name first = get_reg_first(n);
1974      if( OptoReg::is_reg(first) )
1975        tty->print("%s]",Matcher::regName[first]);
1976      else
1977         tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1978    } else
1979    n->out_RegMask().dump();
1980  }
1981  tty->print("/N%d\t",n->_idx);
1982  tty->print("%s === ", n->Name());
1983  uint k;
1984  for (k = 0; k < n->req(); k++) {
1985    Node *m = n->in(k);
1986    if (!m) {
1987      tty->print("_ ");
1988    }
1989    else {
1990      uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1991      tty->print("L%d",r);
1992      // Data MultiNode's can have projections with no real registers.
1993      // Don't die while dumping them.
1994      int op = n->Opcode();
1995      if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1996        if( _node_regs ) {
1997          tty->print("[");
1998          OptoReg::Name second = get_reg_second(n->in(k));
1999          if( OptoReg::is_valid(second) ) {
2000            if( OptoReg::is_reg(second) )
2001              tty->print("%s:",Matcher::regName[second]);
2002            else
2003              tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
2004                         reg2offset_unchecked(second));
2005          }
2006          OptoReg::Name first = get_reg_first(n->in(k));
2007          if( OptoReg::is_reg(first) )
2008            tty->print("%s]",Matcher::regName[first]);
2009          else
2010            tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
2011                       reg2offset_unchecked(first));
2012        } else
2013          n->in_RegMask(k).dump();
2014      }
2015      tty->print("/N%d ",m->_idx);
2016    }
2017  }
2018  if( k < n->len() && n->in(k) ) tty->print("| ");
2019  for( ; k < n->len(); k++ ) {
2020    Node *m = n->in(k);
2021    if(!m) {
2022      break;
2023    }
2024    uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
2025    tty->print("L%d",r);
2026    tty->print("/N%d ",m->_idx);
2027  }
2028  if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
2029  else n->dump_spec(tty);
2030  if( _spilled_once.test(n->_idx ) ) {
2031    tty->print(" Spill_1");
2032    if( _spilled_twice.test(n->_idx ) )
2033      tty->print(" Spill_2");
2034  }
2035  tty->print("\n");
2036}
2037
2038void PhaseChaitin::dump(const Block *b) const {
2039  b->dump_head(&_cfg);
2040
2041  // For all instructions
2042  for( uint j = 0; j < b->number_of_nodes(); j++ )
2043    dump(b->get_node(j));
2044  // Print live-out info at end of block
2045  if( _live ) {
2046    tty->print("Liveout: ");
2047    IndexSet *live = _live->live(b);
2048    IndexSetIterator elements(live);
2049    tty->print("{");
2050    uint i;
2051    while ((i = elements.next()) != 0) {
2052      tty->print("L%d ", _lrg_map.find_const(i));
2053    }
2054    tty->print_cr("}");
2055  }
2056  tty->print("\n");
2057}
2058
2059void PhaseChaitin::dump() const {
2060  tty->print( "--- Chaitin -- argsize: %d  framesize: %d ---\n",
2061              _matcher._new_SP, _framesize );
2062
2063  // For all blocks
2064  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2065    dump(_cfg.get_block(i));
2066  }
2067  // End of per-block dump
2068  tty->print("\n");
2069
2070  if (!_ifg) {
2071    tty->print("(No IFG.)\n");
2072    return;
2073  }
2074
2075  // Dump LRG array
2076  tty->print("--- Live RanGe Array ---\n");
2077  for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2078    tty->print("L%d: ",i2);
2079    if (i2 < _ifg->_maxlrg) {
2080      lrgs(i2).dump();
2081    }
2082    else {
2083      tty->print_cr("new LRG");
2084    }
2085  }
2086  tty->cr();
2087
2088  // Dump lo-degree list
2089  tty->print("Lo degree: ");
2090  for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2091    tty->print("L%d ",i3);
2092  tty->cr();
2093
2094  // Dump lo-stk-degree list
2095  tty->print("Lo stk degree: ");
2096  for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2097    tty->print("L%d ",i4);
2098  tty->cr();
2099
2100  // Dump lo-degree list
2101  tty->print("Hi degree: ");
2102  for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2103    tty->print("L%d ",i5);
2104  tty->cr();
2105}
2106
2107void PhaseChaitin::dump_degree_lists() const {
2108  // Dump lo-degree list
2109  tty->print("Lo degree: ");
2110  for( uint i = _lo_degree; i; i = lrgs(i)._next )
2111    tty->print("L%d ",i);
2112  tty->cr();
2113
2114  // Dump lo-stk-degree list
2115  tty->print("Lo stk degree: ");
2116  for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2117    tty->print("L%d ",i2);
2118  tty->cr();
2119
2120  // Dump lo-degree list
2121  tty->print("Hi degree: ");
2122  for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2123    tty->print("L%d ",i3);
2124  tty->cr();
2125}
2126
2127void PhaseChaitin::dump_simplified() const {
2128  tty->print("Simplified: ");
2129  for( uint i = _simplified; i; i = lrgs(i)._next )
2130    tty->print("L%d ",i);
2131  tty->cr();
2132}
2133
2134static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2135  if ((int)reg < 0)
2136    sprintf(buf, "<OptoReg::%d>", (int)reg);
2137  else if (OptoReg::is_reg(reg))
2138    strcpy(buf, Matcher::regName[reg]);
2139  else
2140    sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2141            pc->reg2offset(reg));
2142  return buf+strlen(buf);
2143}
2144
2145// Dump a register name into a buffer.  Be intelligent if we get called
2146// before allocation is complete.
2147char *PhaseChaitin::dump_register( const Node *n, char *buf  ) const {
2148  if( this == NULL ) {          // Not got anything?
2149    sprintf(buf,"N%d",n->_idx); // Then use Node index
2150  } else if( _node_regs ) {
2151    // Post allocation, use direct mappings, no LRG info available
2152    print_reg( get_reg_first(n), this, buf );
2153  } else {
2154    uint lidx = _lrg_map.find_const(n); // Grab LRG number
2155    if( !_ifg ) {
2156      sprintf(buf,"L%d",lidx);  // No register binding yet
2157    } else if( !lidx ) {        // Special, not allocated value
2158      strcpy(buf,"Special");
2159    } else {
2160      if (lrgs(lidx)._is_vector) {
2161        if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2162          print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2163        else
2164          sprintf(buf,"L%d",lidx); // No register binding yet
2165      } else if( (lrgs(lidx).num_regs() == 1)
2166                 ? lrgs(lidx).mask().is_bound1()
2167                 : lrgs(lidx).mask().is_bound_pair() ) {
2168        // Hah!  We have a bound machine register
2169        print_reg( lrgs(lidx).reg(), this, buf );
2170      } else {
2171        sprintf(buf,"L%d",lidx); // No register binding yet
2172      }
2173    }
2174  }
2175  return buf+strlen(buf);
2176}
2177
2178void PhaseChaitin::dump_for_spill_split_recycle() const {
2179  if( WizardMode && (PrintCompilation || PrintOpto) ) {
2180    // Display which live ranges need to be split and the allocator's state
2181    tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2182    for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2183      if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2184        tty->print("L%d: ", bidx);
2185        lrgs(bidx).dump();
2186      }
2187    }
2188    tty->cr();
2189    dump();
2190  }
2191}
2192
2193void PhaseChaitin::dump_frame() const {
2194  const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2195  const TypeTuple *domain = C->tf()->domain();
2196  const int        argcnt = domain->cnt() - TypeFunc::Parms;
2197
2198  // Incoming arguments in registers dump
2199  for( int k = 0; k < argcnt; k++ ) {
2200    OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2201    if( OptoReg::is_reg(parmreg))  {
2202      const char *reg_name = OptoReg::regname(parmreg);
2203      tty->print("#r%3.3d %s", parmreg, reg_name);
2204      parmreg = _matcher._parm_regs[k].second();
2205      if( OptoReg::is_reg(parmreg))  {
2206        tty->print(":%s", OptoReg::regname(parmreg));
2207      }
2208      tty->print("   : parm %d: ", k);
2209      domain->field_at(k + TypeFunc::Parms)->dump();
2210      tty->cr();
2211    }
2212  }
2213
2214  // Check for un-owned padding above incoming args
2215  OptoReg::Name reg = _matcher._new_SP;
2216  if( reg > _matcher._in_arg_limit ) {
2217    reg = OptoReg::add(reg, -1);
2218    tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2219  }
2220
2221  // Incoming argument area dump
2222  OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2223  while( reg > begin_in_arg ) {
2224    reg = OptoReg::add(reg, -1);
2225    tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2226    int j;
2227    for( j = 0; j < argcnt; j++) {
2228      if( _matcher._parm_regs[j].first() == reg ||
2229          _matcher._parm_regs[j].second() == reg ) {
2230        tty->print("parm %d: ",j);
2231        domain->field_at(j + TypeFunc::Parms)->dump();
2232        tty->cr();
2233        break;
2234      }
2235    }
2236    if( j >= argcnt )
2237      tty->print_cr("HOLE, owned by SELF");
2238  }
2239
2240  // Old outgoing preserve area
2241  while( reg > _matcher._old_SP ) {
2242    reg = OptoReg::add(reg, -1);
2243    tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2244  }
2245
2246  // Old SP
2247  tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2248    reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2249
2250  // Preserve area dump
2251  int fixed_slots = C->fixed_slots();
2252  OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2253  OptoReg::Name return_addr = _matcher.return_addr();
2254
2255  reg = OptoReg::add(reg, -1);
2256  while (OptoReg::is_stack(reg)) {
2257    tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2258    if (return_addr == reg) {
2259      tty->print_cr("return address");
2260    } else if (reg >= begin_in_preserve) {
2261      // Preserved slots are present on x86
2262      if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2263        tty->print_cr("saved fp register");
2264      else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2265               VerifyStackAtCalls)
2266        tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
2267      else
2268        tty->print_cr("in_preserve");
2269    } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2270      tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2271    } else {
2272      tty->print_cr("pad2, stack alignment");
2273    }
2274    reg = OptoReg::add(reg, -1);
2275  }
2276
2277  // Spill area dump
2278  reg = OptoReg::add(_matcher._new_SP, _framesize );
2279  while( reg > _matcher._out_arg_limit ) {
2280    reg = OptoReg::add(reg, -1);
2281    tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2282  }
2283
2284  // Outgoing argument area dump
2285  while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2286    reg = OptoReg::add(reg, -1);
2287    tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2288  }
2289
2290  // Outgoing new preserve area
2291  while( reg > _matcher._new_SP ) {
2292    reg = OptoReg::add(reg, -1);
2293    tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2294  }
2295  tty->print_cr("#");
2296}
2297
2298void PhaseChaitin::dump_bb( uint pre_order ) const {
2299  tty->print_cr("---dump of B%d---",pre_order);
2300  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2301    Block* block = _cfg.get_block(i);
2302    if (block->_pre_order == pre_order) {
2303      dump(block);
2304    }
2305  }
2306}
2307
2308void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2309  tty->print_cr("---dump of L%d---",lidx);
2310
2311  if (_ifg) {
2312    if (lidx >= _lrg_map.max_lrg_id()) {
2313      tty->print("Attempt to print live range index beyond max live range.\n");
2314      return;
2315    }
2316    tty->print("L%d: ",lidx);
2317    if (lidx < _ifg->_maxlrg) {
2318      lrgs(lidx).dump();
2319    } else {
2320      tty->print_cr("new LRG");
2321    }
2322  }
2323  if( _ifg && lidx < _ifg->_maxlrg) {
2324    tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2325    _ifg->neighbors(lidx)->dump();
2326    tty->cr();
2327  }
2328  // For all blocks
2329  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
2330    Block* block = _cfg.get_block(i);
2331    int dump_once = 0;
2332
2333    // For all instructions
2334    for( uint j = 0; j < block->number_of_nodes(); j++ ) {
2335      Node *n = block->get_node(j);
2336      if (_lrg_map.find_const(n) == lidx) {
2337        if (!dump_once++) {
2338          tty->cr();
2339          block->dump_head(&_cfg);
2340        }
2341        dump(n);
2342        continue;
2343      }
2344      if (!defs_only) {
2345        uint cnt = n->req();
2346        for( uint k = 1; k < cnt; k++ ) {
2347          Node *m = n->in(k);
2348          if (!m)  {
2349            continue;  // be robust in the dumper
2350          }
2351          if (_lrg_map.find_const(m) == lidx) {
2352            if (!dump_once++) {
2353              tty->cr();
2354              block->dump_head(&_cfg);
2355            }
2356            dump(n);
2357          }
2358        }
2359      }
2360    }
2361  } // End of per-block dump
2362  tty->cr();
2363}
2364#endif // not PRODUCT
2365
2366int PhaseChaitin::_final_loads  = 0;
2367int PhaseChaitin::_final_stores = 0;
2368int PhaseChaitin::_final_memoves= 0;
2369int PhaseChaitin::_final_copies = 0;
2370double PhaseChaitin::_final_load_cost  = 0;
2371double PhaseChaitin::_final_store_cost = 0;
2372double PhaseChaitin::_final_memove_cost= 0;
2373double PhaseChaitin::_final_copy_cost  = 0;
2374int PhaseChaitin::_conserv_coalesce = 0;
2375int PhaseChaitin::_conserv_coalesce_pair = 0;
2376int PhaseChaitin::_conserv_coalesce_trie = 0;
2377int PhaseChaitin::_conserv_coalesce_quad = 0;
2378int PhaseChaitin::_post_alloc = 0;
2379int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2380int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2381int PhaseChaitin::_used_cisc_instructions   = 0;
2382int PhaseChaitin::_unused_cisc_instructions = 0;
2383int PhaseChaitin::_allocator_attempts       = 0;
2384int PhaseChaitin::_allocator_successes      = 0;
2385
2386#ifndef PRODUCT
2387uint PhaseChaitin::_high_pressure           = 0;
2388uint PhaseChaitin::_low_pressure            = 0;
2389
2390void PhaseChaitin::print_chaitin_statistics() {
2391  tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2392  tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2393  tty->print_cr("Adjusted spill cost = %7.0f.",
2394                _final_load_cost*4.0 + _final_store_cost  * 2.0 +
2395                _final_copy_cost*1.0 + _final_memove_cost*12.0);
2396  tty->print("Conservatively coalesced %d copies, %d pairs",
2397                _conserv_coalesce, _conserv_coalesce_pair);
2398  if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2399    tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2400  tty->print_cr(", %d post alloc.", _post_alloc);
2401  if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2402    tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2403                  _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2404  if( _used_cisc_instructions || _unused_cisc_instructions )
2405    tty->print_cr("Used cisc instruction  %d,  remained in register %d",
2406                   _used_cisc_instructions, _unused_cisc_instructions);
2407  if( _allocator_successes != 0 )
2408    tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2409  tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2410}
2411#endif // not PRODUCT
2412