live.cpp revision 3724:8e47bac5643a
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "memory/allocation.inline.hpp"
27#include "opto/callnode.hpp"
28#include "opto/chaitin.hpp"
29#include "opto/live.hpp"
30#include "opto/machnode.hpp"
31
32
33
34//=============================================================================
35//------------------------------PhaseLive--------------------------------------
36// Compute live-in/live-out.  We use a totally incremental algorithm.  The LIVE
37// problem is monotonic.  The steady-state solution looks like this: pull a
38// block from the worklist.  It has a set of delta's - values which are newly
39// live-in from the block.  Push these to the live-out sets of all predecessor
40// blocks.  At each predecessor, the new live-out values are ANDed with what is
41// already live-out (extra stuff is added to the live-out sets).  Then the
42// remaining new live-out values are ANDed with what is locally defined.
43// Leftover bits become the new live-in for the predecessor block, and the pred
44// block is put on the worklist.
45//   The locally live-in stuff is computed once and added to predecessor
46// live-out sets.  This separate compilation is done in the outer loop below.
47PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
48}
49
50void PhaseLive::compute(uint maxlrg) {
51  _maxlrg   = maxlrg;
52  _worklist = new (_arena) Block_List();
53
54  // Init the sparse live arrays.  This data is live on exit from here!
55  // The _live info is the live-out info.
56  _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet)*_cfg._num_blocks);
57  uint i;
58  for( i=0; i<_cfg._num_blocks; i++ ) {
59    _live[i].initialize(_maxlrg);
60  }
61
62  // Init the sparse arrays for delta-sets.
63  ResourceMark rm;              // Nuke temp storage on exit
64
65  // Does the memory used by _defs and _deltas get reclaimed?  Does it matter?  TT
66
67  // Array of values defined locally in blocks
68  _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg._num_blocks);
69  for( i=0; i<_cfg._num_blocks; i++ ) {
70    _defs[i].initialize(_maxlrg);
71  }
72
73  // Array of delta-set pointers, indexed by block pre_order-1.
74  _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg._num_blocks);
75  memset( _deltas, 0, sizeof(IndexSet*)* _cfg._num_blocks);
76
77  _free_IndexSet = NULL;
78
79  // Blocks having done pass-1
80  VectorSet first_pass(Thread::current()->resource_area());
81
82  // Outer loop: must compute local live-in sets and push into predecessors.
83  uint iters = _cfg._num_blocks;        // stat counters
84  for( uint j=_cfg._num_blocks; j>0; j-- ) {
85    Block *b = _cfg._blocks[j-1];
86
87    // Compute the local live-in set.  Start with any new live-out bits.
88    IndexSet *use = getset( b );
89    IndexSet *def = &_defs[b->_pre_order-1];
90    DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
91    uint i;
92    for( i=b->_nodes.size(); i>1; i-- ) {
93      Node *n = b->_nodes[i-1];
94      if( n->is_Phi() ) break;
95
96      uint r = _names[n->_idx];
97      assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
98      def->insert( r );
99      use->remove( r );
100      uint cnt = n->req();
101      for( uint k=1; k<cnt; k++ ) {
102        Node *nk = n->in(k);
103        uint nkidx = nk->_idx;
104        if( _cfg._bbs[nkidx] != b ) {
105          uint u = _names[nkidx];
106          use->insert( u );
107          DEBUG_ONLY(def_outside->insert( u );)
108        }
109      }
110    }
111#ifdef ASSERT
112    def_outside->set_next(_free_IndexSet);
113    _free_IndexSet = def_outside;     // Drop onto free list
114#endif
115    // Remove anything defined by Phis and the block start instruction
116    for( uint k=i; k>0; k-- ) {
117      uint r = _names[b->_nodes[k-1]->_idx];
118      def->insert( r );
119      use->remove( r );
120    }
121
122    // Push these live-in things to predecessors
123    for( uint l=1; l<b->num_preds(); l++ ) {
124      Block *p = _cfg._bbs[b->pred(l)->_idx];
125      add_liveout( p, use, first_pass );
126
127      // PhiNode uses go in the live-out set of prior blocks.
128      for( uint k=i; k>0; k-- )
129        add_liveout( p, _names[b->_nodes[k-1]->in(l)->_idx], first_pass );
130    }
131    freeset( b );
132    first_pass.set(b->_pre_order);
133
134    // Inner loop: blocks that picked up new live-out values to be propagated
135    while( _worklist->size() ) {
136        // !!!!!
137// #ifdef ASSERT
138      iters++;
139// #endif
140      Block *b = _worklist->pop();
141      IndexSet *delta = getset(b);
142      assert( delta->count(), "missing delta set" );
143
144      // Add new-live-in to predecessors live-out sets
145      for( uint l=1; l<b->num_preds(); l++ )
146        add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
147
148      freeset(b);
149    } // End of while-worklist-not-empty
150
151  } // End of for-all-blocks-outer-loop
152
153  // We explicitly clear all of the IndexSets which we are about to release.
154  // This allows us to recycle their internal memory into IndexSet's free list.
155
156  for( i=0; i<_cfg._num_blocks; i++ ) {
157    _defs[i].clear();
158    if (_deltas[i]) {
159      // Is this always true?
160      _deltas[i]->clear();
161    }
162  }
163  IndexSet *free = _free_IndexSet;
164  while (free != NULL) {
165    IndexSet *temp = free;
166    free = free->next();
167    temp->clear();
168  }
169
170}
171
172//------------------------------stats------------------------------------------
173#ifndef PRODUCT
174void PhaseLive::stats(uint iters) const {
175}
176#endif
177
178//------------------------------getset-----------------------------------------
179// Get an IndexSet for a block.  Return existing one, if any.  Make a new
180// empty one if a prior one does not exist.
181IndexSet *PhaseLive::getset( Block *p ) {
182  IndexSet *delta = _deltas[p->_pre_order-1];
183  if( !delta )                  // Not on worklist?
184    // Get a free set; flag as being on worklist
185    delta = _deltas[p->_pre_order-1] = getfreeset();
186  return delta;                 // Return set of new live-out items
187}
188
189//------------------------------getfreeset-------------------------------------
190// Pull from free list, or allocate.  Internal allocation on the returned set
191// is always from thread local storage.
192IndexSet *PhaseLive::getfreeset( ) {
193  IndexSet *f = _free_IndexSet;
194  if( !f ) {
195    f = new IndexSet;
196//    f->set_arena(Thread::current()->resource_area());
197    f->initialize(_maxlrg, Thread::current()->resource_area());
198  } else {
199    // Pull from free list
200    _free_IndexSet = f->next();
201  //f->_cnt = 0;                        // Reset to empty
202//    f->set_arena(Thread::current()->resource_area());
203    f->initialize(_maxlrg, Thread::current()->resource_area());
204  }
205  return f;
206}
207
208//------------------------------freeset----------------------------------------
209// Free an IndexSet from a block.
210void PhaseLive::freeset( const Block *p ) {
211  IndexSet *f = _deltas[p->_pre_order-1];
212  f->set_next(_free_IndexSet);
213  _free_IndexSet = f;           // Drop onto free list
214  _deltas[p->_pre_order-1] = NULL;
215}
216
217//------------------------------add_liveout------------------------------------
218// Add a live-out value to a given blocks live-out set.  If it is new, then
219// also add it to the delta set and stick the block on the worklist.
220void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
221  IndexSet *live = &_live[p->_pre_order-1];
222  if( live->insert(r) ) {       // If actually inserted...
223    // We extended the live-out set.  See if the value is generated locally.
224    // If it is not, then we must extend the live-in set.
225    if( !_defs[p->_pre_order-1].member( r ) ) {
226      if( !_deltas[p->_pre_order-1] && // Not on worklist?
227          first_pass.test(p->_pre_order) )
228        _worklist->push(p);     // Actually go on worklist if already 1st pass
229      getset(p)->insert(r);
230    }
231  }
232}
233
234
235//------------------------------add_liveout------------------------------------
236// Add a vector of live-out values to a given blocks live-out set.
237void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
238  IndexSet *live = &_live[p->_pre_order-1];
239  IndexSet *defs = &_defs[p->_pre_order-1];
240  IndexSet *on_worklist = _deltas[p->_pre_order-1];
241  IndexSet *delta = on_worklist ? on_worklist : getfreeset();
242
243  IndexSetIterator elements(lo);
244  uint r;
245  while ((r = elements.next()) != 0) {
246    if( live->insert(r) &&      // If actually inserted...
247        !defs->member( r ) )    // and not defined locally
248      delta->insert(r);         // Then add to live-in set
249  }
250
251  if( delta->count() ) {                // If actually added things
252    _deltas[p->_pre_order-1] = delta; // Flag as on worklist now
253    if( !on_worklist &&         // Not on worklist?
254        first_pass.test(p->_pre_order) )
255      _worklist->push(p);       // Actually go on worklist if already 1st pass
256  } else {                      // Nothing there; just free it
257    delta->set_next(_free_IndexSet);
258    _free_IndexSet = delta;     // Drop onto free list
259  }
260}
261
262#ifndef PRODUCT
263//------------------------------dump-------------------------------------------
264// Dump the live-out set for a block
265void PhaseLive::dump( const Block *b ) const {
266  tty->print("Block %d: ",b->_pre_order);
267  tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
268  uint cnt = b->_nodes.size();
269  for( uint i=0; i<cnt; i++ ) {
270    tty->print("L%d/", _names[b->_nodes[i]->_idx] );
271    b->_nodes[i]->dump();
272  }
273  tty->print("\n");
274}
275
276//------------------------------verify_base_ptrs-------------------------------
277// Verify that base pointers and derived pointers are still sane.
278void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
279#ifdef ASSERT
280  Unique_Node_List worklist(a);
281  for( uint i = 0; i < _cfg._num_blocks; i++ ) {
282    Block *b = _cfg._blocks[i];
283    for( uint j = b->end_idx() + 1; j > 1; j-- ) {
284      Node *n = b->_nodes[j-1];
285      if( n->is_Phi() ) break;
286      // Found a safepoint?
287      if( n->is_MachSafePoint() ) {
288        MachSafePointNode *sfpt = n->as_MachSafePoint();
289        JVMState* jvms = sfpt->jvms();
290        if (jvms != NULL) {
291          // Now scan for a live derived pointer
292          if (jvms->oopoff() < sfpt->req()) {
293            // Check each derived/base pair
294            for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
295              Node *check = sfpt->in(idx);
296              bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
297              // search upwards through spills and spill phis for AddP
298              worklist.clear();
299              worklist.push(check);
300              uint k = 0;
301              while( k < worklist.size() ) {
302                check = worklist.at(k);
303                assert(check,"Bad base or derived pointer");
304                // See PhaseChaitin::find_base_for_derived() for all cases.
305                int isc = check->is_Copy();
306                if( isc ) {
307                  worklist.push(check->in(isc));
308                } else if( check->is_Phi() ) {
309                  for (uint m = 1; m < check->req(); m++)
310                    worklist.push(check->in(m));
311                } else if( check->is_Con() ) {
312                  if (is_derived) {
313                    // Derived is NULL+offset
314                    assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
315                  } else {
316                    assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
317                    // Base either ConP(NULL) or loadConP
318                    if (check->is_Mach()) {
319                      assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
320                    } else {
321                      assert(check->Opcode() == Op_ConP &&
322                             check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
323                    }
324                  }
325                } else if( check->bottom_type()->is_ptr()->_offset == 0 ) {
326                  if(check->is_Proj() || check->is_Mach() &&
327                     (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
328                      check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
329                      check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
330                      check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
331#ifdef _LP64
332                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
333                      UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
334                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
335#endif
336                      check->as_Mach()->ideal_Opcode() == Op_LoadP ||
337                      check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
338                    // Valid nodes
339                  } else {
340                    check->dump();
341                    assert(false,"Bad base or derived pointer");
342                  }
343                } else {
344                  assert(is_derived,"Bad base pointer");
345                  assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
346                }
347                k++;
348                assert(k < 100000,"Derived pointer checking in infinite loop");
349              } // End while
350            }
351          } // End of check for derived pointers
352        } // End of Kcheck for debug info
353      } // End of if found a safepoint
354    } // End of forall instructions in block
355  } // End of forall blocks
356#endif
357}
358
359//------------------------------verify-------------------------------------
360// Verify that graphs and base pointers are still sane.
361void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
362#ifdef ASSERT
363  if( VerifyOpto || VerifyRegisterAllocator ) {
364    _cfg.verify();
365    verify_base_ptrs(a);
366    if(verify_ifg)
367      _ifg->verify(this);
368  }
369#endif
370}
371
372#endif
373