phaseX.cpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "memory/allocation.inline.hpp"
27#include "opto/block.hpp"
28#include "opto/callnode.hpp"
29#include "opto/cfgnode.hpp"
30#include "opto/connode.hpp"
31#include "opto/idealGraphPrinter.hpp"
32#include "opto/loopnode.hpp"
33#include "opto/machnode.hpp"
34#include "opto/opcodes.hpp"
35#include "opto/phaseX.hpp"
36#include "opto/regalloc.hpp"
37#include "opto/rootnode.hpp"
38
39//=============================================================================
40#define NODE_HASH_MINIMUM_SIZE    255
41//------------------------------NodeHash---------------------------------------
42NodeHash::NodeHash(uint est_max_size) :
43  _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
44  _a(Thread::current()->resource_area()),
45  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ), // (Node**)_a->Amalloc(_max * sizeof(Node*)) ),
46  _inserts(0), _insert_limit( insert_limit() ),
47  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
48  _total_insert_probes(0), _total_inserts(0),
49  _insert_probes(0), _grows(0) {
50  // _sentinel must be in the current node space
51  _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
52  memset(_table,0,sizeof(Node*)*_max);
53}
54
55//------------------------------NodeHash---------------------------------------
56NodeHash::NodeHash(Arena *arena, uint est_max_size) :
57  _max( round_up(est_max_size < NODE_HASH_MINIMUM_SIZE ? NODE_HASH_MINIMUM_SIZE : est_max_size) ),
58  _a(arena),
59  _table( NEW_ARENA_ARRAY( _a , Node* , _max ) ),
60  _inserts(0), _insert_limit( insert_limit() ),
61  _look_probes(0), _lookup_hits(0), _lookup_misses(0),
62  _delete_probes(0), _delete_hits(0), _delete_misses(0),
63  _total_insert_probes(0), _total_inserts(0),
64  _insert_probes(0), _grows(0) {
65  // _sentinel must be in the current node space
66  _sentinel = new (Compile::current()) ProjNode(NULL, TypeFunc::Control);
67  memset(_table,0,sizeof(Node*)*_max);
68}
69
70//------------------------------NodeHash---------------------------------------
71NodeHash::NodeHash(NodeHash *nh) {
72  debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
73  // just copy in all the fields
74  *this = *nh;
75  // nh->_sentinel must be in the current node space
76}
77
78void NodeHash::replace_with(NodeHash *nh) {
79  debug_only(_table = (Node**)badAddress);   // interact correctly w/ operator=
80  // just copy in all the fields
81  *this = *nh;
82  // nh->_sentinel must be in the current node space
83}
84
85//------------------------------hash_find--------------------------------------
86// Find in hash table
87Node *NodeHash::hash_find( const Node *n ) {
88  // ((Node*)n)->set_hash( n->hash() );
89  uint hash = n->hash();
90  if (hash == Node::NO_HASH) {
91    debug_only( _lookup_misses++ );
92    return NULL;
93  }
94  uint key = hash & (_max-1);
95  uint stride = key | 0x01;
96  debug_only( _look_probes++ );
97  Node *k = _table[key];        // Get hashed value
98  if( !k ) {                    // ?Miss?
99    debug_only( _lookup_misses++ );
100    return NULL;                // Miss!
101  }
102
103  int op = n->Opcode();
104  uint req = n->req();
105  while( 1 ) {                  // While probing hash table
106    if( k->req() == req &&      // Same count of inputs
107        k->Opcode() == op ) {   // Same Opcode
108      for( uint i=0; i<req; i++ )
109        if( n->in(i)!=k->in(i)) // Different inputs?
110          goto collision;       // "goto" is a speed hack...
111      if( n->cmp(*k) ) {        // Check for any special bits
112        debug_only( _lookup_hits++ );
113        return k;               // Hit!
114      }
115    }
116  collision:
117    debug_only( _look_probes++ );
118    key = (key + stride/*7*/) & (_max-1); // Stride through table with relative prime
119    k = _table[key];            // Get hashed value
120    if( !k ) {                  // ?Miss?
121      debug_only( _lookup_misses++ );
122      return NULL;              // Miss!
123    }
124  }
125  ShouldNotReachHere();
126  return NULL;
127}
128
129//------------------------------hash_find_insert-------------------------------
130// Find in hash table, insert if not already present
131// Used to preserve unique entries in hash table
132Node *NodeHash::hash_find_insert( Node *n ) {
133  // n->set_hash( );
134  uint hash = n->hash();
135  if (hash == Node::NO_HASH) {
136    debug_only( _lookup_misses++ );
137    return NULL;
138  }
139  uint key = hash & (_max-1);
140  uint stride = key | 0x01;     // stride must be relatively prime to table siz
141  uint first_sentinel = 0;      // replace a sentinel if seen.
142  debug_only( _look_probes++ );
143  Node *k = _table[key];        // Get hashed value
144  if( !k ) {                    // ?Miss?
145    debug_only( _lookup_misses++ );
146    _table[key] = n;            // Insert into table!
147    debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
148    check_grow();               // Grow table if insert hit limit
149    return NULL;                // Miss!
150  }
151  else if( k == _sentinel ) {
152    first_sentinel = key;      // Can insert here
153  }
154
155  int op = n->Opcode();
156  uint req = n->req();
157  while( 1 ) {                  // While probing hash table
158    if( k->req() == req &&      // Same count of inputs
159        k->Opcode() == op ) {   // Same Opcode
160      for( uint i=0; i<req; i++ )
161        if( n->in(i)!=k->in(i)) // Different inputs?
162          goto collision;       // "goto" is a speed hack...
163      if( n->cmp(*k) ) {        // Check for any special bits
164        debug_only( _lookup_hits++ );
165        return k;               // Hit!
166      }
167    }
168  collision:
169    debug_only( _look_probes++ );
170    key = (key + stride) & (_max-1); // Stride through table w/ relative prime
171    k = _table[key];            // Get hashed value
172    if( !k ) {                  // ?Miss?
173      debug_only( _lookup_misses++ );
174      key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
175      _table[key] = n;          // Insert into table!
176      debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
177      check_grow();             // Grow table if insert hit limit
178      return NULL;              // Miss!
179    }
180    else if( first_sentinel == 0 && k == _sentinel ) {
181      first_sentinel = key;    // Can insert here
182    }
183
184  }
185  ShouldNotReachHere();
186  return NULL;
187}
188
189//------------------------------hash_insert------------------------------------
190// Insert into hash table
191void NodeHash::hash_insert( Node *n ) {
192  // // "conflict" comments -- print nodes that conflict
193  // bool conflict = false;
194  // n->set_hash();
195  uint hash = n->hash();
196  if (hash == Node::NO_HASH) {
197    return;
198  }
199  check_grow();
200  uint key = hash & (_max-1);
201  uint stride = key | 0x01;
202
203  while( 1 ) {                  // While probing hash table
204    debug_only( _insert_probes++ );
205    Node *k = _table[key];      // Get hashed value
206    if( !k || (k == _sentinel) ) break;       // Found a slot
207    assert( k != n, "already inserted" );
208    // if( PrintCompilation && PrintOptoStatistics && Verbose ) { tty->print("  conflict: "); k->dump(); conflict = true; }
209    key = (key + stride) & (_max-1); // Stride through table w/ relative prime
210  }
211  _table[key] = n;              // Insert into table!
212  debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
213  // if( conflict ) { n->dump(); }
214}
215
216//------------------------------hash_delete------------------------------------
217// Replace in hash table with sentinel
218bool NodeHash::hash_delete( const Node *n ) {
219  Node *k;
220  uint hash = n->hash();
221  if (hash == Node::NO_HASH) {
222    debug_only( _delete_misses++ );
223    return false;
224  }
225  uint key = hash & (_max-1);
226  uint stride = key | 0x01;
227  debug_only( uint counter = 0; );
228  for( ; /* (k != NULL) && (k != _sentinel) */; ) {
229    debug_only( counter++ );
230    debug_only( _delete_probes++ );
231    k = _table[key];            // Get hashed value
232    if( !k ) {                  // Miss?
233      debug_only( _delete_misses++ );
234#ifdef ASSERT
235      if( VerifyOpto ) {
236        for( uint i=0; i < _max; i++ )
237          assert( _table[i] != n, "changed edges with rehashing" );
238      }
239#endif
240      return false;             // Miss! Not in chain
241    }
242    else if( n == k ) {
243      debug_only( _delete_hits++ );
244      _table[key] = _sentinel;  // Hit! Label as deleted entry
245      debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
246      return true;
247    }
248    else {
249      // collision: move through table with prime offset
250      key = (key + stride/*7*/) & (_max-1);
251      assert( counter <= _insert_limit, "Cycle in hash-table");
252    }
253  }
254  ShouldNotReachHere();
255  return false;
256}
257
258//------------------------------round_up---------------------------------------
259// Round up to nearest power of 2
260uint NodeHash::round_up( uint x ) {
261  x += (x>>2);                  // Add 25% slop
262  if( x <16 ) return 16;        // Small stuff
263  uint i=16;
264  while( i < x ) i <<= 1;       // Double to fit
265  return i;                     // Return hash table size
266}
267
268//------------------------------grow-------------------------------------------
269// Grow _table to next power of 2 and insert old entries
270void  NodeHash::grow() {
271  // Record old state
272  uint   old_max   = _max;
273  Node **old_table = _table;
274  // Construct new table with twice the space
275  _grows++;
276  _total_inserts       += _inserts;
277  _total_insert_probes += _insert_probes;
278  _inserts         = 0;
279  _insert_probes   = 0;
280  _max     = _max << 1;
281  _table   = NEW_ARENA_ARRAY( _a , Node* , _max ); // (Node**)_a->Amalloc( _max * sizeof(Node*) );
282  memset(_table,0,sizeof(Node*)*_max);
283  _insert_limit = insert_limit();
284  // Insert old entries into the new table
285  for( uint i = 0; i < old_max; i++ ) {
286    Node *m = *old_table++;
287    if( !m || m == _sentinel ) continue;
288    debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table.
289    hash_insert(m);
290  }
291}
292
293//------------------------------clear------------------------------------------
294// Clear all entries in _table to NULL but keep storage
295void  NodeHash::clear() {
296#ifdef ASSERT
297  // Unlock all nodes upon removal from table.
298  for (uint i = 0; i < _max; i++) {
299    Node* n = _table[i];
300    if (!n || n == _sentinel)  continue;
301    n->exit_hash_lock();
302  }
303#endif
304
305  memset( _table, 0, _max * sizeof(Node*) );
306}
307
308//-----------------------remove_useless_nodes----------------------------------
309// Remove useless nodes from value table,
310// implementation does not depend on hash function
311void NodeHash::remove_useless_nodes(VectorSet &useful) {
312
313  // Dead nodes in the hash table inherited from GVN should not replace
314  // existing nodes, remove dead nodes.
315  uint max = size();
316  Node *sentinel_node = sentinel();
317  for( uint i = 0; i < max; ++i ) {
318    Node *n = at(i);
319    if(n != NULL && n != sentinel_node && !useful.test(n->_idx)) {
320      debug_only(n->exit_hash_lock()); // Unlock the node when removed
321      _table[i] = sentinel_node;       // Replace with placeholder
322    }
323  }
324}
325
326#ifndef PRODUCT
327//------------------------------dump-------------------------------------------
328// Dump statistics for the hash table
329void NodeHash::dump() {
330  _total_inserts       += _inserts;
331  _total_insert_probes += _insert_probes;
332  if (PrintCompilation && PrintOptoStatistics && Verbose && (_inserts > 0)) {
333    if (WizardMode) {
334      for (uint i=0; i<_max; i++) {
335        if (_table[i])
336          tty->print("%d/%d/%d ",i,_table[i]->hash()&(_max-1),_table[i]->_idx);
337      }
338    }
339    tty->print("\nGVN Hash stats:  %d grows to %d max_size\n", _grows, _max);
340    tty->print("  %d/%d (%8.1f%% full)\n", _inserts, _max, (double)_inserts/_max*100.0);
341    tty->print("  %dp/(%dh+%dm) (%8.2f probes/lookup)\n", _look_probes, _lookup_hits, _lookup_misses, (double)_look_probes/(_lookup_hits+_lookup_misses));
342    tty->print("  %dp/%di (%8.2f probes/insert)\n", _total_insert_probes, _total_inserts, (double)_total_insert_probes/_total_inserts);
343    // sentinels increase lookup cost, but not insert cost
344    assert((_lookup_misses+_lookup_hits)*4+100 >= _look_probes, "bad hash function");
345    assert( _inserts+(_inserts>>3) < _max, "table too full" );
346    assert( _inserts*3+100 >= _insert_probes, "bad hash function" );
347  }
348}
349
350Node *NodeHash::find_index(uint idx) { // For debugging
351  // Find an entry by its index value
352  for( uint i = 0; i < _max; i++ ) {
353    Node *m = _table[i];
354    if( !m || m == _sentinel ) continue;
355    if( m->_idx == (uint)idx ) return m;
356  }
357  return NULL;
358}
359#endif
360
361#ifdef ASSERT
362NodeHash::~NodeHash() {
363  // Unlock all nodes upon destruction of table.
364  if (_table != (Node**)badAddress)  clear();
365}
366
367void NodeHash::operator=(const NodeHash& nh) {
368  // Unlock all nodes upon replacement of table.
369  if (&nh == this)  return;
370  if (_table != (Node**)badAddress)  clear();
371  memcpy(this, &nh, sizeof(*this));
372  // Do not increment hash_lock counts again.
373  // Instead, be sure we never again use the source table.
374  ((NodeHash*)&nh)->_table = (Node**)badAddress;
375}
376
377
378#endif
379
380
381//=============================================================================
382//------------------------------PhaseRemoveUseless-----------------------------
383// 1) Use a breadthfirst walk to collect useful nodes reachable from root.
384PhaseRemoveUseless::PhaseRemoveUseless( PhaseGVN *gvn, Unique_Node_List *worklist ) : Phase(Remove_Useless),
385  _useful(Thread::current()->resource_area()) {
386
387  // Implementation requires 'UseLoopSafepoints == true' and an edge from root
388  // to each SafePointNode at a backward branch.  Inserted in add_safepoint().
389  if( !UseLoopSafepoints || !OptoRemoveUseless ) return;
390
391  // Identify nodes that are reachable from below, useful.
392  C->identify_useful_nodes(_useful);
393  // Update dead node list
394  C->update_dead_node_list(_useful);
395
396  // Remove all useless nodes from PhaseValues' recorded types
397  // Must be done before disconnecting nodes to preserve hash-table-invariant
398  gvn->remove_useless_nodes(_useful.member_set());
399
400  // Remove all useless nodes from future worklist
401  worklist->remove_useless_nodes(_useful.member_set());
402
403  // Disconnect 'useless' nodes that are adjacent to useful nodes
404  C->remove_useless_nodes(_useful);
405
406  // Remove edges from "root" to each SafePoint at a backward branch.
407  // They were inserted during parsing (see add_safepoint()) to make infinite
408  // loops without calls or exceptions visible to root, i.e., useful.
409  Node *root = C->root();
410  if( root != NULL ) {
411    for( uint i = root->req(); i < root->len(); ++i ) {
412      Node *n = root->in(i);
413      if( n != NULL && n->is_SafePoint() ) {
414        root->rm_prec(i);
415        --i;
416      }
417    }
418  }
419}
420
421
422//=============================================================================
423//------------------------------PhaseTransform---------------------------------
424PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum),
425  _arena(Thread::current()->resource_area()),
426  _nodes(_arena),
427  _types(_arena)
428{
429  init_con_caches();
430#ifndef PRODUCT
431  clear_progress();
432  clear_transforms();
433  set_allow_progress(true);
434#endif
435  // Force allocation for currently existing nodes
436  _types.map(C->unique(), NULL);
437}
438
439//------------------------------PhaseTransform---------------------------------
440PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum),
441  _arena(arena),
442  _nodes(arena),
443  _types(arena)
444{
445  init_con_caches();
446#ifndef PRODUCT
447  clear_progress();
448  clear_transforms();
449  set_allow_progress(true);
450#endif
451  // Force allocation for currently existing nodes
452  _types.map(C->unique(), NULL);
453}
454
455//------------------------------PhaseTransform---------------------------------
456// Initialize with previously generated type information
457PhaseTransform::PhaseTransform( PhaseTransform *pt, PhaseNumber pnum ) : Phase(pnum),
458  _arena(pt->_arena),
459  _nodes(pt->_nodes),
460  _types(pt->_types)
461{
462  init_con_caches();
463#ifndef PRODUCT
464  clear_progress();
465  clear_transforms();
466  set_allow_progress(true);
467#endif
468}
469
470void PhaseTransform::init_con_caches() {
471  memset(_icons,0,sizeof(_icons));
472  memset(_lcons,0,sizeof(_lcons));
473  memset(_zcons,0,sizeof(_zcons));
474}
475
476
477//--------------------------------find_int_type--------------------------------
478const TypeInt* PhaseTransform::find_int_type(Node* n) {
479  if (n == NULL)  return NULL;
480  // Call type_or_null(n) to determine node's type since we might be in
481  // parse phase and call n->Value() may return wrong type.
482  // (For example, a phi node at the beginning of loop parsing is not ready.)
483  const Type* t = type_or_null(n);
484  if (t == NULL)  return NULL;
485  return t->isa_int();
486}
487
488
489//-------------------------------find_long_type--------------------------------
490const TypeLong* PhaseTransform::find_long_type(Node* n) {
491  if (n == NULL)  return NULL;
492  // (See comment above on type_or_null.)
493  const Type* t = type_or_null(n);
494  if (t == NULL)  return NULL;
495  return t->isa_long();
496}
497
498
499#ifndef PRODUCT
500void PhaseTransform::dump_old2new_map() const {
501  _nodes.dump();
502}
503
504void PhaseTransform::dump_new( uint nidx ) const {
505  for( uint i=0; i<_nodes.Size(); i++ )
506    if( _nodes[i] && _nodes[i]->_idx == nidx ) {
507      _nodes[i]->dump();
508      tty->cr();
509      tty->print_cr("Old index= %d",i);
510      return;
511    }
512  tty->print_cr("Node %d not found in the new indices", nidx);
513}
514
515//------------------------------dump_types-------------------------------------
516void PhaseTransform::dump_types( ) const {
517  _types.dump();
518}
519
520//------------------------------dump_nodes_and_types---------------------------
521void PhaseTransform::dump_nodes_and_types(const Node *root, uint depth, bool only_ctrl) {
522  VectorSet visited(Thread::current()->resource_area());
523  dump_nodes_and_types_recur( root, depth, only_ctrl, visited );
524}
525
526//------------------------------dump_nodes_and_types_recur---------------------
527void PhaseTransform::dump_nodes_and_types_recur( const Node *n, uint depth, bool only_ctrl, VectorSet &visited) {
528  if( !n ) return;
529  if( depth == 0 ) return;
530  if( visited.test_set(n->_idx) ) return;
531  for( uint i=0; i<n->len(); i++ ) {
532    if( only_ctrl && !(n->is_Region()) && i != TypeFunc::Control ) continue;
533    dump_nodes_and_types_recur( n->in(i), depth-1, only_ctrl, visited );
534  }
535  n->dump();
536  if (type_or_null(n) != NULL) {
537    tty->print("      "); type(n)->dump(); tty->cr();
538  }
539}
540
541#endif
542
543
544//=============================================================================
545//------------------------------PhaseValues------------------------------------
546// Set minimum table size to "255"
547PhaseValues::PhaseValues( Arena *arena, uint est_max_size ) : PhaseTransform(arena, GVN), _table(arena, est_max_size) {
548  NOT_PRODUCT( clear_new_values(); )
549}
550
551//------------------------------PhaseValues------------------------------------
552// Set minimum table size to "255"
553PhaseValues::PhaseValues( PhaseValues *ptv ) : PhaseTransform( ptv, GVN ),
554  _table(&ptv->_table) {
555  NOT_PRODUCT( clear_new_values(); )
556}
557
558//------------------------------PhaseValues------------------------------------
559// Used by +VerifyOpto.  Clear out hash table but copy _types array.
560PhaseValues::PhaseValues( PhaseValues *ptv, const char *dummy ) : PhaseTransform( ptv, GVN ),
561  _table(ptv->arena(),ptv->_table.size()) {
562  NOT_PRODUCT( clear_new_values(); )
563}
564
565//------------------------------~PhaseValues-----------------------------------
566#ifndef PRODUCT
567PhaseValues::~PhaseValues() {
568  _table.dump();
569
570  // Statistics for value progress and efficiency
571  if( PrintCompilation && Verbose && WizardMode ) {
572    tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
573      is_IterGVN() ? "Iter" : "    ", C->unique(), made_progress(), made_transforms(), made_new_values());
574    if( made_transforms() != 0 ) {
575      tty->print_cr("  ratio %f", made_progress()/(float)made_transforms() );
576    } else {
577      tty->cr();
578    }
579  }
580}
581#endif
582
583//------------------------------makecon----------------------------------------
584ConNode* PhaseTransform::makecon(const Type *t) {
585  assert(t->singleton(), "must be a constant");
586  assert(!t->empty() || t == Type::TOP, "must not be vacuous range");
587  switch (t->base()) {  // fast paths
588  case Type::Half:
589  case Type::Top:  return (ConNode*) C->top();
590  case Type::Int:  return intcon( t->is_int()->get_con() );
591  case Type::Long: return longcon( t->is_long()->get_con() );
592  }
593  if (t->is_zero_type())
594    return zerocon(t->basic_type());
595  return uncached_makecon(t);
596}
597
598//--------------------------uncached_makecon-----------------------------------
599// Make an idealized constant - one of ConINode, ConPNode, etc.
600ConNode* PhaseValues::uncached_makecon(const Type *t) {
601  assert(t->singleton(), "must be a constant");
602  ConNode* x = ConNode::make(C, t);
603  ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering
604  if (k == NULL) {
605    set_type(x, t);             // Missed, provide type mapping
606    GrowableArray<Node_Notes*>* nna = C->node_note_array();
607    if (nna != NULL) {
608      Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true);
609      loc->clear(); // do not put debug info on constants
610    }
611  } else {
612    x->destruct();              // Hit, destroy duplicate constant
613    x = k;                      // use existing constant
614  }
615  return x;
616}
617
618//------------------------------intcon-----------------------------------------
619// Fast integer constant.  Same as "transform(new ConINode(TypeInt::make(i)))"
620ConINode* PhaseTransform::intcon(int i) {
621  // Small integer?  Check cache! Check that cached node is not dead
622  if (i >= _icon_min && i <= _icon_max) {
623    ConINode* icon = _icons[i-_icon_min];
624    if (icon != NULL && icon->in(TypeFunc::Control) != NULL)
625      return icon;
626  }
627  ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i));
628  assert(icon->is_Con(), "");
629  if (i >= _icon_min && i <= _icon_max)
630    _icons[i-_icon_min] = icon;   // Cache small integers
631  return icon;
632}
633
634//------------------------------longcon----------------------------------------
635// Fast long constant.
636ConLNode* PhaseTransform::longcon(jlong l) {
637  // Small integer?  Check cache! Check that cached node is not dead
638  if (l >= _lcon_min && l <= _lcon_max) {
639    ConLNode* lcon = _lcons[l-_lcon_min];
640    if (lcon != NULL && lcon->in(TypeFunc::Control) != NULL)
641      return lcon;
642  }
643  ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l));
644  assert(lcon->is_Con(), "");
645  if (l >= _lcon_min && l <= _lcon_max)
646    _lcons[l-_lcon_min] = lcon;      // Cache small integers
647  return lcon;
648}
649
650//------------------------------zerocon-----------------------------------------
651// Fast zero or null constant. Same as "transform(ConNode::make(Type::get_zero_type(bt)))"
652ConNode* PhaseTransform::zerocon(BasicType bt) {
653  assert((uint)bt <= _zcon_max, "domain check");
654  ConNode* zcon = _zcons[bt];
655  if (zcon != NULL && zcon->in(TypeFunc::Control) != NULL)
656    return zcon;
657  zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt));
658  _zcons[bt] = zcon;
659  return zcon;
660}
661
662
663
664//=============================================================================
665//------------------------------transform--------------------------------------
666// Return a node which computes the same function as this node, but in a
667// faster or cheaper fashion.
668Node *PhaseGVN::transform( Node *n ) {
669  return transform_no_reclaim(n);
670}
671
672//------------------------------transform--------------------------------------
673// Return a node which computes the same function as this node, but
674// in a faster or cheaper fashion.
675Node *PhaseGVN::transform_no_reclaim( Node *n ) {
676  NOT_PRODUCT( set_transforms(); )
677
678  // Apply the Ideal call in a loop until it no longer applies
679  Node *k = n;
680  NOT_PRODUCT( uint loop_count = 0; )
681  while( 1 ) {
682    Node *i = k->Ideal(this, /*can_reshape=*/false);
683    if( !i ) break;
684    assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
685    k = i;
686    assert(loop_count++ < K, "infinite loop in PhaseGVN::transform");
687  }
688  NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } )
689
690
691  // If brand new node, make space in type array.
692  ensure_type_or_null(k);
693
694  // Since I just called 'Value' to compute the set of run-time values
695  // for this Node, and 'Value' is non-local (and therefore expensive) I'll
696  // cache Value.  Later requests for the local phase->type of this Node can
697  // use the cached Value instead of suffering with 'bottom_type'.
698  const Type *t = k->Value(this); // Get runtime Value set
699  assert(t != NULL, "value sanity");
700  if (type_or_null(k) != t) {
701#ifndef PRODUCT
702    // Do not count initial visit to node as a transformation
703    if (type_or_null(k) == NULL) {
704      inc_new_values();
705      set_progress();
706    }
707#endif
708    set_type(k, t);
709    // If k is a TypeNode, capture any more-precise type permanently into Node
710    k->raise_bottom_type(t);
711  }
712
713  if( t->singleton() && !k->is_Con() ) {
714    NOT_PRODUCT( set_progress(); )
715    return makecon(t);          // Turn into a constant
716  }
717
718  // Now check for Identities
719  Node *i = k->Identity(this);  // Look for a nearby replacement
720  if( i != k ) {                // Found? Return replacement!
721    NOT_PRODUCT( set_progress(); )
722    return i;
723  }
724
725  // Global Value Numbering
726  i = hash_find_insert(k);      // Insert if new
727  if( i && (i != k) ) {
728    // Return the pre-existing node
729    NOT_PRODUCT( set_progress(); )
730    return i;
731  }
732
733  // Return Idealized original
734  return k;
735}
736
737#ifdef ASSERT
738//------------------------------dead_loop_check--------------------------------
739// Check for a simple dead loop when a data node references itself directly
740// or through an other data node excluding cons and phis.
741void PhaseGVN::dead_loop_check( Node *n ) {
742  // Phi may reference itself in a loop
743  if (n != NULL && !n->is_dead_loop_safe() && !n->is_CFG()) {
744    // Do 2 levels check and only data inputs.
745    bool no_dead_loop = true;
746    uint cnt = n->req();
747    for (uint i = 1; i < cnt && no_dead_loop; i++) {
748      Node *in = n->in(i);
749      if (in == n) {
750        no_dead_loop = false;
751      } else if (in != NULL && !in->is_dead_loop_safe()) {
752        uint icnt = in->req();
753        for (uint j = 1; j < icnt && no_dead_loop; j++) {
754          if (in->in(j) == n || in->in(j) == in)
755            no_dead_loop = false;
756        }
757      }
758    }
759    if (!no_dead_loop) n->dump(3);
760    assert(no_dead_loop, "dead loop detected");
761  }
762}
763#endif
764
765//=============================================================================
766//------------------------------PhaseIterGVN-----------------------------------
767// Initialize hash table to fresh and clean for +VerifyOpto
768PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ),
769                                                                      _stack(C->unique() >> 1),
770                                                                      _delay_transform(false) {
771}
772
773//------------------------------PhaseIterGVN-----------------------------------
774// Initialize with previous PhaseIterGVN info; used by PhaseCCP
775PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn ) : PhaseGVN(igvn),
776                                                   _worklist( igvn->_worklist ),
777                                                   _stack( igvn->_stack ),
778                                                   _delay_transform(igvn->_delay_transform)
779{
780}
781
782//------------------------------PhaseIterGVN-----------------------------------
783// Initialize with previous PhaseGVN info from Parser
784PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
785                                              _worklist(*C->for_igvn()),
786                                              _stack(C->unique() >> 1),
787                                              _delay_transform(false)
788{
789  uint max;
790
791  // Dead nodes in the hash table inherited from GVN were not treated as
792  // roots during def-use info creation; hence they represent an invisible
793  // use.  Clear them out.
794  max = _table.size();
795  for( uint i = 0; i < max; ++i ) {
796    Node *n = _table.at(i);
797    if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) {
798      if( n->is_top() ) continue;
799      assert( false, "Parse::remove_useless_nodes missed this node");
800      hash_delete(n);
801    }
802  }
803
804  // Any Phis or Regions on the worklist probably had uses that could not
805  // make more progress because the uses were made while the Phis and Regions
806  // were in half-built states.  Put all uses of Phis and Regions on worklist.
807  max = _worklist.size();
808  for( uint j = 0; j < max; j++ ) {
809    Node *n = _worklist.at(j);
810    uint uop = n->Opcode();
811    if( uop == Op_Phi || uop == Op_Region ||
812        n->is_Type() ||
813        n->is_Mem() )
814      add_users_to_worklist(n);
815  }
816}
817
818
819#ifndef PRODUCT
820void PhaseIterGVN::verify_step(Node* n) {
821  _verify_window[_verify_counter % _verify_window_size] = n;
822  ++_verify_counter;
823  ResourceMark rm;
824  ResourceArea *area = Thread::current()->resource_area();
825  VectorSet old_space(area), new_space(area);
826  if (C->unique() < 1000 ||
827      0 == _verify_counter % (C->unique() < 10000 ? 10 : 100)) {
828    ++_verify_full_passes;
829    Node::verify_recur(C->root(), -1, old_space, new_space);
830  }
831  const int verify_depth = 4;
832  for ( int i = 0; i < _verify_window_size; i++ ) {
833    Node* n = _verify_window[i];
834    if ( n == NULL )  continue;
835    if( n->in(0) == NodeSentinel ) {  // xform_idom
836      _verify_window[i] = n->in(1);
837      --i; continue;
838    }
839    // Typical fanout is 1-2, so this call visits about 6 nodes.
840    Node::verify_recur(n, verify_depth, old_space, new_space);
841  }
842}
843#endif
844
845
846//------------------------------init_worklist----------------------------------
847// Initialize worklist for each node.
848void PhaseIterGVN::init_worklist( Node *n ) {
849  if( _worklist.member(n) ) return;
850  _worklist.push(n);
851  uint cnt = n->req();
852  for( uint i =0 ; i < cnt; i++ ) {
853    Node *m = n->in(i);
854    if( m ) init_worklist(m);
855  }
856}
857
858//------------------------------optimize---------------------------------------
859void PhaseIterGVN::optimize() {
860  debug_only(uint num_processed  = 0;);
861#ifndef PRODUCT
862  {
863    _verify_counter = 0;
864    _verify_full_passes = 0;
865    for ( int i = 0; i < _verify_window_size; i++ ) {
866      _verify_window[i] = NULL;
867    }
868  }
869#endif
870
871#ifdef ASSERT
872  Node* prev = NULL;
873  uint rep_cnt = 0;
874#endif
875  uint loop_count = 0;
876
877  // Pull from worklist; transform node;
878  // If node has changed: update edge info and put uses on worklist.
879  while( _worklist.size() ) {
880    if (C->check_node_count(NodeLimitFudgeFactor * 2,
881                            "out of nodes optimizing method")) {
882      return;
883    }
884    Node *n  = _worklist.pop();
885    if (++loop_count >= K * C->live_nodes()) {
886      debug_only(n->dump(4);)
887      assert(false, "infinite loop in PhaseIterGVN::optimize");
888      C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
889      return;
890    }
891#ifdef ASSERT
892    if (n == prev) {
893      if (++rep_cnt > 3) {
894        n->dump(4);
895        assert(false, "loop in Ideal transformation");
896      }
897    } else {
898      rep_cnt = 0;
899    }
900    prev = n;
901#endif
902    if (TraceIterativeGVN && Verbose) {
903      tty->print("  Pop ");
904      NOT_PRODUCT( n->dump(); )
905      debug_only(if( (num_processed++ % 100) == 0 ) _worklist.print_set();)
906    }
907
908    if (n->outcnt() != 0) {
909
910#ifndef PRODUCT
911      uint wlsize = _worklist.size();
912      const Type* oldtype = type_or_null(n);
913#endif //PRODUCT
914
915      Node *nn = transform_old(n);
916
917#ifndef PRODUCT
918      if (TraceIterativeGVN) {
919        const Type* newtype = type_or_null(n);
920        if (nn != n) {
921          // print old node
922          tty->print("< ");
923          if (oldtype != newtype && oldtype != NULL) {
924            oldtype->dump();
925          }
926          do { tty->print("\t"); } while (tty->position() < 16);
927          tty->print("<");
928          n->dump();
929        }
930        if (oldtype != newtype || nn != n) {
931          // print new node and/or new type
932          if (oldtype == NULL) {
933            tty->print("* ");
934          } else if (nn != n) {
935            tty->print("> ");
936          } else {
937            tty->print("= ");
938          }
939          if (newtype == NULL) {
940            tty->print("null");
941          } else {
942            newtype->dump();
943          }
944          do { tty->print("\t"); } while (tty->position() < 16);
945          nn->dump();
946        }
947        if (Verbose && wlsize < _worklist.size()) {
948          tty->print("  Push {");
949          while (wlsize != _worklist.size()) {
950            Node* pushed = _worklist.at(wlsize++);
951            tty->print(" %d", pushed->_idx);
952          }
953          tty->print_cr(" }");
954        }
955      }
956      if( VerifyIterativeGVN && nn != n ) {
957        verify_step((Node*) NULL);  // ignore n, it might be subsumed
958      }
959#endif
960    } else if (!n->is_top()) {
961      remove_dead_node(n);
962    }
963  }
964
965#ifndef PRODUCT
966  C->verify_graph_edges();
967  if( VerifyOpto && allow_progress() ) {
968    // Must turn off allow_progress to enable assert and break recursion
969    C->root()->verify();
970    { // Check if any progress was missed using IterGVN
971      // Def-Use info enables transformations not attempted in wash-pass
972      // e.g. Region/Phi cleanup, ...
973      // Null-check elision -- may not have reached fixpoint
974      //                       do not propagate to dominated nodes
975      ResourceMark rm;
976      PhaseIterGVN igvn2(this,"Verify"); // Fresh and clean!
977      // Fill worklist completely
978      igvn2.init_worklist(C->root());
979
980      igvn2.set_allow_progress(false);
981      igvn2.optimize();
982      igvn2.set_allow_progress(true);
983    }
984  }
985  if ( VerifyIterativeGVN && PrintOpto ) {
986    if ( _verify_counter == _verify_full_passes )
987      tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
988                    _verify_full_passes);
989    else
990      tty->print_cr("VerifyIterativeGVN: %d transforms, %d full verify passes",
991                  _verify_counter, _verify_full_passes);
992  }
993#endif
994}
995
996
997//------------------register_new_node_with_optimizer---------------------------
998// Register a new node with the optimizer.  Update the types array, the def-use
999// info.  Put on worklist.
1000Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) {
1001  set_type_bottom(n);
1002  _worklist.push(n);
1003  if (orig != NULL)  C->copy_node_notes_to(n, orig);
1004  return n;
1005}
1006
1007//------------------------------transform--------------------------------------
1008// Non-recursive: idealize Node 'n' with respect to its inputs and its value
1009Node *PhaseIterGVN::transform( Node *n ) {
1010  if (_delay_transform) {
1011    // Register the node but don't optimize for now
1012    register_new_node_with_optimizer(n);
1013    return n;
1014  }
1015
1016  // If brand new node, make space in type array, and give it a type.
1017  ensure_type_or_null(n);
1018  if (type_or_null(n) == NULL) {
1019    set_type_bottom(n);
1020  }
1021
1022  return transform_old(n);
1023}
1024
1025//------------------------------transform_old----------------------------------
1026Node *PhaseIterGVN::transform_old( Node *n ) {
1027#ifndef PRODUCT
1028  debug_only(uint loop_count = 0;);
1029  set_transforms();
1030#endif
1031  // Remove 'n' from hash table in case it gets modified
1032  _table.hash_delete(n);
1033  if( VerifyIterativeGVN ) {
1034   assert( !_table.find_index(n->_idx), "found duplicate entry in table");
1035  }
1036
1037  // Apply the Ideal call in a loop until it no longer applies
1038  Node *k = n;
1039  DEBUG_ONLY(dead_loop_check(k);)
1040  DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
1041  Node *i = k->Ideal(this, /*can_reshape=*/true);
1042  assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1043#ifndef PRODUCT
1044  if( VerifyIterativeGVN )
1045    verify_step(k);
1046  if( i && VerifyOpto ) {
1047    if( !allow_progress() ) {
1048      if (i->is_Add() && i->outcnt() == 1) {
1049        // Switched input to left side because this is the only use
1050      } else if( i->is_If() && (i->in(0) == NULL) ) {
1051        // This IF is dead because it is dominated by an equivalent IF When
1052        // dominating if changed, info is not propagated sparsely to 'this'
1053        // Propagating this info further will spuriously identify other
1054        // progress.
1055        return i;
1056      } else
1057        set_progress();
1058    } else
1059      set_progress();
1060  }
1061#endif
1062
1063  while( i ) {
1064#ifndef PRODUCT
1065    debug_only( if( loop_count >= K ) i->dump(4); )
1066    assert(loop_count < K, "infinite loop in PhaseIterGVN::transform");
1067    debug_only( loop_count++; )
1068#endif
1069    assert((i->_idx >= k->_idx) || i->is_top(), "Idealize should return new nodes, use Identity to return old nodes");
1070    // Made a change; put users of original Node on worklist
1071    add_users_to_worklist( k );
1072    // Replacing root of transform tree?
1073    if( k != i ) {
1074      // Make users of old Node now use new.
1075      subsume_node( k, i );
1076      k = i;
1077    }
1078    DEBUG_ONLY(dead_loop_check(k);)
1079    // Try idealizing again
1080    DEBUG_ONLY(is_new = (k->outcnt() == 0);)
1081    i = k->Ideal(this, /*can_reshape=*/true);
1082    assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
1083#ifndef PRODUCT
1084    if( VerifyIterativeGVN )
1085      verify_step(k);
1086    if( i && VerifyOpto ) set_progress();
1087#endif
1088  }
1089
1090  // If brand new node, make space in type array.
1091  ensure_type_or_null(k);
1092
1093  // See what kind of values 'k' takes on at runtime
1094  const Type *t = k->Value(this);
1095  assert(t != NULL, "value sanity");
1096
1097  // Since I just called 'Value' to compute the set of run-time values
1098  // for this Node, and 'Value' is non-local (and therefore expensive) I'll
1099  // cache Value.  Later requests for the local phase->type of this Node can
1100  // use the cached Value instead of suffering with 'bottom_type'.
1101  if (t != type_or_null(k)) {
1102    NOT_PRODUCT( set_progress(); )
1103    NOT_PRODUCT( inc_new_values();)
1104    set_type(k, t);
1105    // If k is a TypeNode, capture any more-precise type permanently into Node
1106    k->raise_bottom_type(t);
1107    // Move users of node to worklist
1108    add_users_to_worklist( k );
1109  }
1110
1111  // If 'k' computes a constant, replace it with a constant
1112  if( t->singleton() && !k->is_Con() ) {
1113    NOT_PRODUCT( set_progress(); )
1114    Node *con = makecon(t);     // Make a constant
1115    add_users_to_worklist( k );
1116    subsume_node( k, con );     // Everybody using k now uses con
1117    return con;
1118  }
1119
1120  // Now check for Identities
1121  i = k->Identity(this);        // Look for a nearby replacement
1122  if( i != k ) {                // Found? Return replacement!
1123    NOT_PRODUCT( set_progress(); )
1124    add_users_to_worklist( k );
1125    subsume_node( k, i );       // Everybody using k now uses i
1126    return i;
1127  }
1128
1129  // Global Value Numbering
1130  i = hash_find_insert(k);      // Check for pre-existing node
1131  if( i && (i != k) ) {
1132    // Return the pre-existing node if it isn't dead
1133    NOT_PRODUCT( set_progress(); )
1134    add_users_to_worklist( k );
1135    subsume_node( k, i );       // Everybody using k now uses i
1136    return i;
1137  }
1138
1139  // Return Idealized original
1140  return k;
1141}
1142
1143//---------------------------------saturate------------------------------------
1144const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
1145                                   const Type* limit_type) const {
1146  return new_type->narrow(old_type);
1147}
1148
1149//------------------------------remove_globally_dead_node----------------------
1150// Kill a globally dead Node.  All uses are also globally dead and are
1151// aggressively trimmed.
1152void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
1153  enum DeleteProgress {
1154    PROCESS_INPUTS,
1155    PROCESS_OUTPUTS
1156  };
1157  assert(_stack.is_empty(), "not empty");
1158  _stack.push(dead, PROCESS_INPUTS);
1159
1160  while (_stack.is_nonempty()) {
1161    dead = _stack.node();
1162    uint progress_state = _stack.index();
1163    assert(dead != C->root(), "killing root, eh?");
1164    assert(!dead->is_top(), "add check for top when pushing");
1165    NOT_PRODUCT( set_progress(); )
1166    if (progress_state == PROCESS_INPUTS) {
1167      // After following inputs, continue to outputs
1168      _stack.set_index(PROCESS_OUTPUTS);
1169      if (!dead->is_Con()) { // Don't kill cons but uses
1170        bool recurse = false;
1171        // Remove from hash table
1172        _table.hash_delete( dead );
1173        // Smash all inputs to 'dead', isolating him completely
1174        for (uint i = 0; i < dead->req(); i++) {
1175          Node *in = dead->in(i);
1176          if (in != NULL && in != C->top()) {  // Points to something?
1177            int nrep = dead->replace_edge(in, NULL);  // Kill edges
1178            assert((nrep > 0), "sanity");
1179            if (in->outcnt() == 0) { // Made input go dead?
1180              _stack.push(in, PROCESS_INPUTS); // Recursively remove
1181              recurse = true;
1182            } else if (in->outcnt() == 1 &&
1183                       in->has_special_unique_user()) {
1184              _worklist.push(in->unique_out());
1185            } else if (in->outcnt() <= 2 && dead->is_Phi()) {
1186              if (in->Opcode() == Op_Region) {
1187                _worklist.push(in);
1188              } else if (in->is_Store()) {
1189                DUIterator_Fast imax, i = in->fast_outs(imax);
1190                _worklist.push(in->fast_out(i));
1191                i++;
1192                if (in->outcnt() == 2) {
1193                  _worklist.push(in->fast_out(i));
1194                  i++;
1195                }
1196                assert(!(i < imax), "sanity");
1197              }
1198            }
1199            if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
1200                in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
1201              // A Load that directly follows an InitializeNode is
1202              // going away. The Stores that follow are candidates
1203              // again to be captured by the InitializeNode.
1204              for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
1205                Node *n = in->fast_out(j);
1206                if (n->is_Store()) {
1207                  _worklist.push(n);
1208                }
1209              }
1210            }
1211          } // if (in != NULL && in != C->top())
1212        } // for (uint i = 0; i < dead->req(); i++)
1213        if (recurse) {
1214          continue;
1215        }
1216      } // if (!dead->is_Con())
1217    } // if (progress_state == PROCESS_INPUTS)
1218
1219    // Aggressively kill globally dead uses
1220    // (Rather than pushing all the outs at once, we push one at a time,
1221    // plus the parent to resume later, because of the indefinite number
1222    // of edge deletions per loop trip.)
1223    if (dead->outcnt() > 0) {
1224      // Recursively remove output edges
1225      _stack.push(dead->raw_out(0), PROCESS_INPUTS);
1226    } else {
1227      // Finished disconnecting all input and output edges.
1228      _stack.pop();
1229      // Remove dead node from iterative worklist
1230      _worklist.remove(dead);
1231      // Constant node that has no out-edges and has only one in-edge from
1232      // root is usually dead. However, sometimes reshaping walk makes
1233      // it reachable by adding use edges. So, we will NOT count Con nodes
1234      // as dead to be conservative about the dead node count at any
1235      // given time.
1236      if (!dead->is_Con()) {
1237        C->record_dead_node(dead->_idx);
1238      }
1239      if (dead->is_macro()) {
1240        C->remove_macro_node(dead);
1241      }
1242      if (dead->is_expensive()) {
1243        C->remove_expensive_node(dead);
1244      }
1245    }
1246  } // while (_stack.is_nonempty())
1247}
1248
1249//------------------------------subsume_node-----------------------------------
1250// Remove users from node 'old' and add them to node 'nn'.
1251void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
1252  assert( old != hash_find(old), "should already been removed" );
1253  assert( old != C->top(), "cannot subsume top node");
1254  // Copy debug or profile information to the new version:
1255  C->copy_node_notes_to(nn, old);
1256  // Move users of node 'old' to node 'nn'
1257  for (DUIterator_Last imin, i = old->last_outs(imin); i >= imin; ) {
1258    Node* use = old->last_out(i);  // for each use...
1259    // use might need re-hashing (but it won't if it's a new node)
1260    bool is_in_table = _table.hash_delete( use );
1261    // Update use-def info as well
1262    // We remove all occurrences of old within use->in,
1263    // so as to avoid rehashing any node more than once.
1264    // The hash table probe swamps any outer loop overhead.
1265    uint num_edges = 0;
1266    for (uint jmax = use->len(), j = 0; j < jmax; j++) {
1267      if (use->in(j) == old) {
1268        use->set_req(j, nn);
1269        ++num_edges;
1270      }
1271    }
1272    // Insert into GVN hash table if unique
1273    // If a duplicate, 'use' will be cleaned up when pulled off worklist
1274    if( is_in_table ) {
1275      hash_find_insert(use);
1276    }
1277    i -= num_edges;    // we deleted 1 or more copies of this edge
1278  }
1279
1280  // Smash all inputs to 'old', isolating him completely
1281  Node *temp = new (C) Node(1);
1282  temp->init_req(0,nn);     // Add a use to nn to prevent him from dying
1283  remove_dead_node( old );
1284  temp->del_req(0);         // Yank bogus edge
1285#ifndef PRODUCT
1286  if( VerifyIterativeGVN ) {
1287    for ( int i = 0; i < _verify_window_size; i++ ) {
1288      if ( _verify_window[i] == old )
1289        _verify_window[i] = nn;
1290    }
1291  }
1292#endif
1293  _worklist.remove(temp);   // this can be necessary
1294  temp->destruct();         // reuse the _idx of this little guy
1295}
1296
1297//------------------------------add_users_to_worklist--------------------------
1298void PhaseIterGVN::add_users_to_worklist0( Node *n ) {
1299  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1300    _worklist.push(n->fast_out(i));  // Push on worklist
1301  }
1302}
1303
1304void PhaseIterGVN::add_users_to_worklist( Node *n ) {
1305  add_users_to_worklist0(n);
1306
1307  // Move users of node to worklist
1308  for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1309    Node* use = n->fast_out(i); // Get use
1310
1311    if( use->is_Multi() ||      // Multi-definer?  Push projs on worklist
1312        use->is_Store() )       // Enable store/load same address
1313      add_users_to_worklist0(use);
1314
1315    // If we changed the receiver type to a call, we need to revisit
1316    // the Catch following the call.  It's looking for a non-NULL
1317    // receiver to know when to enable the regular fall-through path
1318    // in addition to the NullPtrException path.
1319    if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) {
1320      Node* p = use->as_CallDynamicJava()->proj_out(TypeFunc::Control);
1321      if (p != NULL) {
1322        add_users_to_worklist0(p);
1323      }
1324    }
1325
1326    if( use->is_Cmp() ) {       // Enable CMP/BOOL optimization
1327      add_users_to_worklist(use); // Put Bool on worklist
1328      // Look for the 'is_x2logic' pattern: "x ? : 0 : 1" and put the
1329      // phi merging either 0 or 1 onto the worklist
1330      if (use->outcnt() > 0) {
1331        Node* bol = use->raw_out(0);
1332        if (bol->outcnt() > 0) {
1333          Node* iff = bol->raw_out(0);
1334          if (iff->outcnt() == 2) {
1335            Node* ifproj0 = iff->raw_out(0);
1336            Node* ifproj1 = iff->raw_out(1);
1337            if (ifproj0->outcnt() > 0 && ifproj1->outcnt() > 0) {
1338              Node* region0 = ifproj0->raw_out(0);
1339              Node* region1 = ifproj1->raw_out(0);
1340              if( region0 == region1 )
1341                add_users_to_worklist0(region0);
1342            }
1343          }
1344        }
1345      }
1346    }
1347
1348    uint use_op = use->Opcode();
1349    // If changed Cast input, check Phi users for simple cycles
1350    if( use->is_ConstraintCast() || use->is_CheckCastPP() ) {
1351      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1352        Node* u = use->fast_out(i2);
1353        if (u->is_Phi())
1354          _worklist.push(u);
1355      }
1356    }
1357    // If changed LShift inputs, check RShift users for useless sign-ext
1358    if( use_op == Op_LShiftI ) {
1359      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1360        Node* u = use->fast_out(i2);
1361        if (u->Opcode() == Op_RShiftI)
1362          _worklist.push(u);
1363      }
1364    }
1365    // If changed AddP inputs, check Stores for loop invariant
1366    if( use_op == Op_AddP ) {
1367      for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
1368        Node* u = use->fast_out(i2);
1369        if (u->is_Mem())
1370          _worklist.push(u);
1371      }
1372    }
1373    // If changed initialization activity, check dependent Stores
1374    if (use_op == Op_Allocate || use_op == Op_AllocateArray) {
1375      InitializeNode* init = use->as_Allocate()->initialization();
1376      if (init != NULL) {
1377        Node* imem = init->proj_out(TypeFunc::Memory);
1378        if (imem != NULL)  add_users_to_worklist0(imem);
1379      }
1380    }
1381    if (use_op == Op_Initialize) {
1382      Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory);
1383      if (imem != NULL)  add_users_to_worklist0(imem);
1384    }
1385  }
1386}
1387
1388/**
1389 * Remove the speculative part of all types that we know of
1390 */
1391void PhaseIterGVN::remove_speculative_types()  {
1392  assert(UseTypeSpeculation, "speculation is off");
1393  for (uint i = 0; i < _types.Size(); i++)  {
1394    const Type* t = _types.fast_lookup(i);
1395    if (t != NULL && t->isa_oopptr()) {
1396      const TypeOopPtr* to = t->is_oopptr();
1397      _types.map(i, to->remove_speculative());
1398    }
1399  }
1400}
1401
1402//=============================================================================
1403#ifndef PRODUCT
1404uint PhaseCCP::_total_invokes   = 0;
1405uint PhaseCCP::_total_constants = 0;
1406#endif
1407//------------------------------PhaseCCP---------------------------------------
1408// Conditional Constant Propagation, ala Wegman & Zadeck
1409PhaseCCP::PhaseCCP( PhaseIterGVN *igvn ) : PhaseIterGVN(igvn) {
1410  NOT_PRODUCT( clear_constants(); )
1411  assert( _worklist.size() == 0, "" );
1412  // Clear out _nodes from IterGVN.  Must be clear to transform call.
1413  _nodes.clear();               // Clear out from IterGVN
1414  analyze();
1415}
1416
1417#ifndef PRODUCT
1418//------------------------------~PhaseCCP--------------------------------------
1419PhaseCCP::~PhaseCCP() {
1420  inc_invokes();
1421  _total_constants += count_constants();
1422}
1423#endif
1424
1425
1426#ifdef ASSERT
1427static bool ccp_type_widens(const Type* t, const Type* t0) {
1428  assert(t->meet(t0) == t, "Not monotonic");
1429  switch (t->base() == t0->base() ? t->base() : Type::Top) {
1430  case Type::Int:
1431    assert(t0->isa_int()->_widen <= t->isa_int()->_widen, "widen increases");
1432    break;
1433  case Type::Long:
1434    assert(t0->isa_long()->_widen <= t->isa_long()->_widen, "widen increases");
1435    break;
1436  }
1437  return true;
1438}
1439#endif //ASSERT
1440
1441//------------------------------analyze----------------------------------------
1442void PhaseCCP::analyze() {
1443  // Initialize all types to TOP, optimistic analysis
1444  for (int i = C->unique() - 1; i >= 0; i--)  {
1445    _types.map(i,Type::TOP);
1446  }
1447
1448  // Push root onto worklist
1449  Unique_Node_List worklist;
1450  worklist.push(C->root());
1451
1452  // Pull from worklist; compute new value; push changes out.
1453  // This loop is the meat of CCP.
1454  while( worklist.size() ) {
1455    Node *n = worklist.pop();
1456    const Type *t = n->Value(this);
1457    if (t != type(n)) {
1458      assert(ccp_type_widens(t, type(n)), "ccp type must widen");
1459#ifndef PRODUCT
1460      if( TracePhaseCCP ) {
1461        t->dump();
1462        do { tty->print("\t"); } while (tty->position() < 16);
1463        n->dump();
1464      }
1465#endif
1466      set_type(n, t);
1467      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1468        Node* m = n->fast_out(i);   // Get user
1469        if( m->is_Region() ) {  // New path to Region?  Must recheck Phis too
1470          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1471            Node* p = m->fast_out(i2); // Propagate changes to uses
1472            if( p->bottom_type() != type(p) ) // If not already bottomed out
1473              worklist.push(p); // Propagate change to user
1474          }
1475        }
1476        // If we changed the receiver type to a call, we need to revisit
1477        // the Catch following the call.  It's looking for a non-NULL
1478        // receiver to know when to enable the regular fall-through path
1479        // in addition to the NullPtrException path
1480        if (m->is_Call()) {
1481          for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
1482            Node* p = m->fast_out(i2);  // Propagate changes to uses
1483            if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1)
1484              worklist.push(p->unique_out());
1485          }
1486        }
1487        if( m->bottom_type() != type(m) ) // If not already bottomed out
1488          worklist.push(m);     // Propagate change to user
1489      }
1490    }
1491  }
1492}
1493
1494//------------------------------do_transform-----------------------------------
1495// Top level driver for the recursive transformer
1496void PhaseCCP::do_transform() {
1497  // Correct leaves of new-space Nodes; they point to old-space.
1498  C->set_root( transform(C->root())->as_Root() );
1499  assert( C->top(),  "missing TOP node" );
1500  assert( C->root(), "missing root" );
1501}
1502
1503//------------------------------transform--------------------------------------
1504// Given a Node in old-space, clone him into new-space.
1505// Convert any of his old-space children into new-space children.
1506Node *PhaseCCP::transform( Node *n ) {
1507  Node *new_node = _nodes[n->_idx]; // Check for transformed node
1508  if( new_node != NULL )
1509    return new_node;                // Been there, done that, return old answer
1510  new_node = transform_once(n);     // Check for constant
1511  _nodes.map( n->_idx, new_node );  // Flag as having been cloned
1512
1513  // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc
1514  GrowableArray <Node *> trstack(C->unique() >> 1);
1515
1516  trstack.push(new_node);           // Process children of cloned node
1517  while ( trstack.is_nonempty() ) {
1518    Node *clone = trstack.pop();
1519    uint cnt = clone->req();
1520    for( uint i = 0; i < cnt; i++ ) {          // For all inputs do
1521      Node *input = clone->in(i);
1522      if( input != NULL ) {                    // Ignore NULLs
1523        Node *new_input = _nodes[input->_idx]; // Check for cloned input node
1524        if( new_input == NULL ) {
1525          new_input = transform_once(input);   // Check for constant
1526          _nodes.map( input->_idx, new_input );// Flag as having been cloned
1527          trstack.push(new_input);
1528        }
1529        assert( new_input == clone->in(i), "insanity check");
1530      }
1531    }
1532  }
1533  return new_node;
1534}
1535
1536
1537//------------------------------transform_once---------------------------------
1538// For PhaseCCP, transformation is IDENTITY unless Node computed a constant.
1539Node *PhaseCCP::transform_once( Node *n ) {
1540  const Type *t = type(n);
1541  // Constant?  Use constant Node instead
1542  if( t->singleton() ) {
1543    Node *nn = n;               // Default is to return the original constant
1544    if( t == Type::TOP ) {
1545      // cache my top node on the Compile instance
1546      if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) {
1547        C->set_cached_top_node( ConNode::make(C, Type::TOP) );
1548        set_type(C->top(), Type::TOP);
1549      }
1550      nn = C->top();
1551    }
1552    if( !n->is_Con() ) {
1553      if( t != Type::TOP ) {
1554        nn = makecon(t);        // ConNode::make(t);
1555        NOT_PRODUCT( inc_constants(); )
1556      } else if( n->is_Region() ) { // Unreachable region
1557        // Note: nn == C->top()
1558        n->set_req(0, NULL);        // Cut selfreference
1559        // Eagerly remove dead phis to avoid phis copies creation.
1560        for (DUIterator i = n->outs(); n->has_out(i); i++) {
1561          Node* m = n->out(i);
1562          if( m->is_Phi() ) {
1563            assert(type(m) == Type::TOP, "Unreachable region should not have live phis.");
1564            replace_node(m, nn);
1565            --i; // deleted this phi; rescan starting with next position
1566          }
1567        }
1568      }
1569      replace_node(n,nn);       // Update DefUse edges for new constant
1570    }
1571    return nn;
1572  }
1573
1574  // If x is a TypeNode, capture any more-precise type permanently into Node
1575  if (t != n->bottom_type()) {
1576    hash_delete(n);             // changing bottom type may force a rehash
1577    n->raise_bottom_type(t);
1578    _worklist.push(n);          // n re-enters the hash table via the worklist
1579  }
1580
1581  // Idealize graph using DU info.  Must clone() into new-space.
1582  // DU info is generally used to show profitability, progress or safety
1583  // (but generally not needed for correctness).
1584  Node *nn = n->Ideal_DU_postCCP(this);
1585
1586  // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks
1587  switch( n->Opcode() ) {
1588  case Op_FastLock:      // Revisit FastLocks for lock coarsening
1589  case Op_If:
1590  case Op_CountedLoopEnd:
1591  case Op_Region:
1592  case Op_Loop:
1593  case Op_CountedLoop:
1594  case Op_Conv2B:
1595  case Op_Opaque1:
1596  case Op_Opaque2:
1597    _worklist.push(n);
1598    break;
1599  default:
1600    break;
1601  }
1602  if( nn ) {
1603    _worklist.push(n);
1604    // Put users of 'n' onto worklist for second igvn transform
1605    add_users_to_worklist(n);
1606    return nn;
1607  }
1608
1609  return  n;
1610}
1611
1612//---------------------------------saturate------------------------------------
1613const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
1614                               const Type* limit_type) const {
1615  const Type* wide_type = new_type->widen(old_type, limit_type);
1616  if (wide_type != new_type) {          // did we widen?
1617    // If so, we may have widened beyond the limit type.  Clip it back down.
1618    new_type = wide_type->filter(limit_type);
1619  }
1620  return new_type;
1621}
1622
1623//------------------------------print_statistics-------------------------------
1624#ifndef PRODUCT
1625void PhaseCCP::print_statistics() {
1626  tty->print_cr("CCP: %d  constants found: %d", _total_invokes, _total_constants);
1627}
1628#endif
1629
1630
1631//=============================================================================
1632#ifndef PRODUCT
1633uint PhasePeephole::_total_peepholes = 0;
1634#endif
1635//------------------------------PhasePeephole----------------------------------
1636// Conditional Constant Propagation, ala Wegman & Zadeck
1637PhasePeephole::PhasePeephole( PhaseRegAlloc *regalloc, PhaseCFG &cfg )
1638  : PhaseTransform(Peephole), _regalloc(regalloc), _cfg(cfg) {
1639  NOT_PRODUCT( clear_peepholes(); )
1640}
1641
1642#ifndef PRODUCT
1643//------------------------------~PhasePeephole---------------------------------
1644PhasePeephole::~PhasePeephole() {
1645  _total_peepholes += count_peepholes();
1646}
1647#endif
1648
1649//------------------------------transform--------------------------------------
1650Node *PhasePeephole::transform( Node *n ) {
1651  ShouldNotCallThis();
1652  return NULL;
1653}
1654
1655//------------------------------do_transform-----------------------------------
1656void PhasePeephole::do_transform() {
1657  bool method_name_not_printed = true;
1658
1659  // Examine each basic block
1660  for (uint block_number = 1; block_number < _cfg.number_of_blocks(); ++block_number) {
1661    Block* block = _cfg.get_block(block_number);
1662    bool block_not_printed = true;
1663
1664    // and each instruction within a block
1665    uint end_index = block->number_of_nodes();
1666    // block->end_idx() not valid after PhaseRegAlloc
1667    for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
1668      Node     *n = block->get_node(instruction_index);
1669      if( n->is_Mach() ) {
1670        MachNode *m = n->as_Mach();
1671        int deleted_count = 0;
1672        // check for peephole opportunities
1673        MachNode *m2 = m->peephole( block, instruction_index, _regalloc, deleted_count, C );
1674        if( m2 != NULL ) {
1675#ifndef PRODUCT
1676          if( PrintOptoPeephole ) {
1677            // Print method, first time only
1678            if( C->method() && method_name_not_printed ) {
1679              C->method()->print_short_name(); tty->cr();
1680              method_name_not_printed = false;
1681            }
1682            // Print this block
1683            if( Verbose && block_not_printed) {
1684              tty->print_cr("in block");
1685              block->dump();
1686              block_not_printed = false;
1687            }
1688            // Print instructions being deleted
1689            for( int i = (deleted_count - 1); i >= 0; --i ) {
1690              block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
1691            }
1692            tty->print_cr("replaced with");
1693            // Print new instruction
1694            m2->format(_regalloc);
1695            tty->print("\n\n");
1696          }
1697#endif
1698          // Remove old nodes from basic block and update instruction_index
1699          // (old nodes still exist and may have edges pointing to them
1700          //  as register allocation info is stored in the allocator using
1701          //  the node index to live range mappings.)
1702          uint safe_instruction_index = (instruction_index - deleted_count);
1703          for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
1704            block->remove_node( instruction_index );
1705          }
1706          // install new node after safe_instruction_index
1707          block->insert_node(m2, safe_instruction_index + 1);
1708          end_index = block->number_of_nodes() - 1; // Recompute new block size
1709          NOT_PRODUCT( inc_peepholes(); )
1710        }
1711      }
1712    }
1713  }
1714}
1715
1716//------------------------------print_statistics-------------------------------
1717#ifndef PRODUCT
1718void PhasePeephole::print_statistics() {
1719  tty->print_cr("Peephole: peephole rules applied: %d",  _total_peepholes);
1720}
1721#endif
1722
1723
1724//=============================================================================
1725//------------------------------set_req_X--------------------------------------
1726void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) {
1727  assert( is_not_dead(n), "can not use dead node");
1728  assert( igvn->hash_find(this) != this, "Need to remove from hash before changing edges" );
1729  Node *old = in(i);
1730  set_req(i, n);
1731
1732  // old goes dead?
1733  if( old ) {
1734    switch (old->outcnt()) {
1735    case 0:
1736      // Put into the worklist to kill later. We do not kill it now because the
1737      // recursive kill will delete the current node (this) if dead-loop exists
1738      if (!old->is_top())
1739        igvn->_worklist.push( old );
1740      break;
1741    case 1:
1742      if( old->is_Store() || old->has_special_unique_user() )
1743        igvn->add_users_to_worklist( old );
1744      break;
1745    case 2:
1746      if( old->is_Store() )
1747        igvn->add_users_to_worklist( old );
1748      if( old->Opcode() == Op_Region )
1749        igvn->_worklist.push(old);
1750      break;
1751    case 3:
1752      if( old->Opcode() == Op_Region ) {
1753        igvn->_worklist.push(old);
1754        igvn->add_users_to_worklist( old );
1755      }
1756      break;
1757    default:
1758      break;
1759    }
1760  }
1761
1762}
1763
1764//-------------------------------replace_by-----------------------------------
1765// Using def-use info, replace one node for another.  Follow the def-use info
1766// to all users of the OLD node.  Then make all uses point to the NEW node.
1767void Node::replace_by(Node *new_node) {
1768  assert(!is_top(), "top node has no DU info");
1769  for (DUIterator_Last imin, i = last_outs(imin); i >= imin; ) {
1770    Node* use = last_out(i);
1771    uint uses_found = 0;
1772    for (uint j = 0; j < use->len(); j++) {
1773      if (use->in(j) == this) {
1774        if (j < use->req())
1775              use->set_req(j, new_node);
1776        else  use->set_prec(j, new_node);
1777        uses_found++;
1778      }
1779    }
1780    i -= uses_found;    // we deleted 1 or more copies of this edge
1781  }
1782}
1783
1784//=============================================================================
1785//-----------------------------------------------------------------------------
1786void Type_Array::grow( uint i ) {
1787  if( !_max ) {
1788    _max = 1;
1789    _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) );
1790    _types[0] = NULL;
1791  }
1792  uint old = _max;
1793  while( i >= _max ) _max <<= 1;        // Double to fit
1794  _types = (const Type**)_a->Arealloc( _types, old*sizeof(Type*),_max*sizeof(Type*));
1795  memset( &_types[old], 0, (_max-old)*sizeof(Type*) );
1796}
1797
1798//------------------------------dump-------------------------------------------
1799#ifndef PRODUCT
1800void Type_Array::dump() const {
1801  uint max = Size();
1802  for( uint i = 0; i < max; i++ ) {
1803    if( _types[i] != NULL ) {
1804      tty->print("  %d\t== ", i); _types[i]->dump(); tty->cr();
1805    }
1806  }
1807}
1808#endif
1809