buildOopMap.cpp revision 2273:1d1603768966
1/*
2 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/oopMap.hpp"
27#include "opto/addnode.hpp"
28#include "opto/callnode.hpp"
29#include "opto/compile.hpp"
30#include "opto/machnode.hpp"
31#include "opto/matcher.hpp"
32#include "opto/phase.hpp"
33#include "opto/regalloc.hpp"
34#include "opto/rootnode.hpp"
35#ifdef TARGET_ARCH_x86
36# include "vmreg_x86.inline.hpp"
37#endif
38#ifdef TARGET_ARCH_sparc
39# include "vmreg_sparc.inline.hpp"
40#endif
41#ifdef TARGET_ARCH_zero
42# include "vmreg_zero.inline.hpp"
43#endif
44#ifdef TARGET_ARCH_arm
45# include "vmreg_arm.inline.hpp"
46#endif
47#ifdef TARGET_ARCH_ppc
48# include "vmreg_ppc.inline.hpp"
49#endif
50
51// The functions in this file builds OopMaps after all scheduling is done.
52//
53// OopMaps contain a list of all registers and stack-slots containing oops (so
54// they can be updated by GC).  OopMaps also contain a list of derived-pointer
55// base-pointer pairs.  When the base is moved, the derived pointer moves to
56// follow it.  Finally, any registers holding callee-save values are also
57// recorded.  These might contain oops, but only the caller knows.
58//
59// BuildOopMaps implements a simple forward reaching-defs solution.  At each
60// GC point we'll have the reaching-def Nodes.  If the reaching Nodes are
61// typed as pointers (no offset), then they are oops.  Pointers+offsets are
62// derived pointers, and bases can be found from them.  Finally, we'll also
63// track reaching callee-save values.  Note that a copy of a callee-save value
64// "kills" it's source, so that only 1 copy of a callee-save value is alive at
65// a time.
66//
67// We run a simple bitvector liveness pass to help trim out dead oops.  Due to
68// irreducible loops, we can have a reaching def of an oop that only reaches
69// along one path and no way to know if it's valid or not on the other path.
70// The bitvectors are quite dense and the liveness pass is fast.
71//
72// At GC points, we consult this information to build OopMaps.  All reaching
73// defs typed as oops are added to the OopMap.  Only 1 instance of a
74// callee-save register can be recorded.  For derived pointers, we'll have to
75// find and record the register holding the base.
76//
77// The reaching def's is a simple 1-pass worklist approach.  I tried a clever
78// breadth-first approach but it was worse (showed O(n^2) in the
79// pick-next-block code).
80//
81// The relevant data is kept in a struct of arrays (it could just as well be
82// an array of structs, but the struct-of-arrays is generally a little more
83// efficient).  The arrays are indexed by register number (including
84// stack-slots as registers) and so is bounded by 200 to 300 elements in
85// practice.  One array will map to a reaching def Node (or NULL for
86// conflict/dead).  The other array will map to a callee-saved register or
87// OptoReg::Bad for not-callee-saved.
88
89
90//------------------------------OopFlow----------------------------------------
91// Structure to pass around
92struct OopFlow : public ResourceObj {
93  short *_callees;              // Array mapping register to callee-saved
94  Node **_defs;                 // array mapping register to reaching def
95                                // or NULL if dead/conflict
96  // OopFlow structs, when not being actively modified, describe the _end_ of
97  // this block.
98  Block *_b;                    // Block for this struct
99  OopFlow *_next;               // Next free OopFlow
100                                // or NULL if dead/conflict
101  Compile* C;
102
103  OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
104    _b(NULL), _next(NULL), C(c) { }
105
106  // Given reaching-defs for this block start, compute it for this block end
107  void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
108
109  // Merge these two OopFlows into the 'this' pointer.
110  void merge( OopFlow *flow, int max_reg );
111
112  // Copy a 'flow' over an existing flow
113  void clone( OopFlow *flow, int max_size);
114
115  // Make a new OopFlow from scratch
116  static OopFlow *make( Arena *A, int max_size, Compile* C );
117
118  // Build an oopmap from the current flow info
119  OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
120};
121
122//------------------------------compute_reach----------------------------------
123// Given reaching-defs for this block start, compute it for this block end
124void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
125
126  for( uint i=0; i<_b->_nodes.size(); i++ ) {
127    Node *n = _b->_nodes[i];
128
129    if( n->jvms() ) {           // Build an OopMap here?
130      JVMState *jvms = n->jvms();
131      // no map needed for leaf calls
132      if( n->is_MachSafePoint() && !n->is_MachCallLeaf() ) {
133        int *live = (int*) (*safehash)[n];
134        assert( live, "must find live" );
135        n->as_MachSafePoint()->set_oop_map( build_oop_map(n,max_reg,regalloc, live) );
136      }
137    }
138
139    // Assign new reaching def's.
140    // Note that I padded the _defs and _callees arrays so it's legal
141    // to index at _defs[OptoReg::Bad].
142    OptoReg::Name first = regalloc->get_reg_first(n);
143    OptoReg::Name second = regalloc->get_reg_second(n);
144    _defs[first] = n;
145    _defs[second] = n;
146
147    // Pass callee-save info around copies
148    int idx = n->is_Copy();
149    if( idx ) {                 // Copies move callee-save info
150      OptoReg::Name old_first = regalloc->get_reg_first(n->in(idx));
151      OptoReg::Name old_second = regalloc->get_reg_second(n->in(idx));
152      int tmp_first = _callees[old_first];
153      int tmp_second = _callees[old_second];
154      _callees[old_first] = OptoReg::Bad; // callee-save is moved, dead in old location
155      _callees[old_second] = OptoReg::Bad;
156      _callees[first] = tmp_first;
157      _callees[second] = tmp_second;
158    } else if( n->is_Phi() ) {  // Phis do not mod callee-saves
159      assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(1))], "" );
160      assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(1))], "" );
161      assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(n->req()-1))], "" );
162      assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(n->req()-1))], "" );
163    } else {
164      _callees[first] = OptoReg::Bad; // No longer holding a callee-save value
165      _callees[second] = OptoReg::Bad;
166
167      // Find base case for callee saves
168      if( n->is_Proj() && n->in(0)->is_Start() ) {
169        if( OptoReg::is_reg(first) &&
170            regalloc->_matcher.is_save_on_entry(first) )
171          _callees[first] = first;
172        if( OptoReg::is_reg(second) &&
173            regalloc->_matcher.is_save_on_entry(second) )
174          _callees[second] = second;
175      }
176    }
177  }
178}
179
180//------------------------------merge------------------------------------------
181// Merge the given flow into the 'this' flow
182void OopFlow::merge( OopFlow *flow, int max_reg ) {
183  assert( _b == NULL, "merging into a happy flow" );
184  assert( flow->_b, "this flow is still alive" );
185  assert( flow != this, "no self flow" );
186
187  // Do the merge.  If there are any differences, drop to 'bottom' which
188  // is OptoReg::Bad or NULL depending.
189  for( int i=0; i<max_reg; i++ ) {
190    // Merge the callee-save's
191    if( _callees[i] != flow->_callees[i] )
192      _callees[i] = OptoReg::Bad;
193    // Merge the reaching defs
194    if( _defs[i] != flow->_defs[i] )
195      _defs[i] = NULL;
196  }
197
198}
199
200//------------------------------clone------------------------------------------
201void OopFlow::clone( OopFlow *flow, int max_size ) {
202  _b = flow->_b;
203  memcpy( _callees, flow->_callees, sizeof(short)*max_size);
204  memcpy( _defs   , flow->_defs   , sizeof(Node*)*max_size);
205}
206
207//------------------------------make-------------------------------------------
208OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
209  short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
210  Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
211  debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
212  OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
213  assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
214  assert( &flow->_defs   [OptoReg::Bad] == defs   , "Ok to index at OptoReg::Bad" );
215  return flow;
216}
217
218//------------------------------bit twiddlers----------------------------------
219static int get_live_bit( int *live, int reg ) {
220  return live[reg>>LogBitsPerInt] &   (1<<(reg&(BitsPerInt-1))); }
221static void set_live_bit( int *live, int reg ) {
222         live[reg>>LogBitsPerInt] |=  (1<<(reg&(BitsPerInt-1))); }
223static void clr_live_bit( int *live, int reg ) {
224         live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
225
226//------------------------------build_oop_map----------------------------------
227// Build an oopmap from the current flow info
228OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
229  int framesize = regalloc->_framesize;
230  int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
231  debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
232              memset(dup_check,0,OptoReg::stack0()) );
233
234  OopMap *omap = new OopMap( framesize,  max_inarg_slot );
235  MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : NULL;
236  JVMState* jvms = n->jvms();
237
238  // For all registers do...
239  for( int reg=0; reg<max_reg; reg++ ) {
240    if( get_live_bit(live,reg) == 0 )
241      continue;                 // Ignore if not live
242
243    // %%% C2 can use 2 OptoRegs when the physical register is only one 64bit
244    // register in that case we'll get an non-concrete register for the second
245    // half. We only need to tell the map the register once!
246    //
247    // However for the moment we disable this change and leave things as they
248    // were.
249
250    VMReg r = OptoReg::as_VMReg(OptoReg::Name(reg), framesize, max_inarg_slot);
251
252    if (false && r->is_reg() && !r->is_concrete()) {
253      continue;
254    }
255
256    // See if dead (no reaching def).
257    Node *def = _defs[reg];     // Get reaching def
258    assert( def, "since live better have reaching def" );
259
260    // Classify the reaching def as oop, derived, callee-save, dead, or other
261    const Type *t = def->bottom_type();
262    if( t->isa_oop_ptr() ) {    // Oop or derived?
263      assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
264#ifdef _LP64
265      // 64-bit pointers record oop-ishness on 2 aligned adjacent registers.
266      // Make sure both are record from the same reaching def, but do not
267      // put both into the oopmap.
268      if( (reg&1) == 1 ) {      // High half of oop-pair?
269        assert( _defs[reg-1] == _defs[reg], "both halves from same reaching def" );
270        continue;               // Do not record high parts in oopmap
271      }
272#endif
273
274      // Check for a legal reg name in the oopMap and bailout if it is not.
275      if (!omap->legal_vm_reg_name(r)) {
276        regalloc->C->record_method_not_compilable("illegal oopMap register name");
277        continue;
278      }
279      if( t->is_ptr()->_offset == 0 ) { // Not derived?
280        if( mcall ) {
281          // Outgoing argument GC mask responsibility belongs to the callee,
282          // not the caller.  Inspect the inputs to the call, to see if
283          // this live-range is one of them.
284          uint cnt = mcall->tf()->domain()->cnt();
285          uint j;
286          for( j = TypeFunc::Parms; j < cnt; j++)
287            if( mcall->in(j) == def )
288              break;            // reaching def is an argument oop
289          if( j < cnt )         // arg oops dont go in GC map
290            continue;           // Continue on to the next register
291        }
292        omap->set_oop(r);
293      } else {                  // Else it's derived.
294        // Find the base of the derived value.
295        uint i;
296        // Fast, common case, scan
297        for( i = jvms->oopoff(); i < n->req(); i+=2 )
298          if( n->in(i) == def ) break; // Common case
299        if( i == n->req() ) {   // Missed, try a more generous scan
300          // Scan again, but this time peek through copies
301          for( i = jvms->oopoff(); i < n->req(); i+=2 ) {
302            Node *m = n->in(i); // Get initial derived value
303            while( 1 ) {
304              Node *d = def;    // Get initial reaching def
305              while( 1 ) {      // Follow copies of reaching def to end
306                if( m == d ) goto found; // breaks 3 loops
307                int idx = d->is_Copy();
308                if( !idx ) break;
309                d = d->in(idx);     // Link through copy
310              }
311              int idx = m->is_Copy();
312              if( !idx ) break;
313              m = m->in(idx);
314            }
315          }
316          guarantee( 0, "must find derived/base pair" );
317        }
318      found: ;
319        Node *base = n->in(i+1); // Base is other half of pair
320        int breg = regalloc->get_reg_first(base);
321        VMReg b = OptoReg::as_VMReg(OptoReg::Name(breg), framesize, max_inarg_slot);
322
323        // I record liveness at safepoints BEFORE I make the inputs
324        // live.  This is because argument oops are NOT live at a
325        // safepoint (or at least they cannot appear in the oopmap).
326        // Thus bases of base/derived pairs might not be in the
327        // liveness data but they need to appear in the oopmap.
328        if( get_live_bit(live,breg) == 0 ) {// Not live?
329          // Flag it, so next derived pointer won't re-insert into oopmap
330          set_live_bit(live,breg);
331          // Already missed our turn?
332          if( breg < reg ) {
333            if (b->is_stack() || b->is_concrete() || true ) {
334              omap->set_oop( b);
335            }
336          }
337        }
338        if (b->is_stack() || b->is_concrete() || true ) {
339          omap->set_derived_oop( r, b);
340        }
341      }
342
343    } else if( t->isa_narrowoop() ) {
344      assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
345      // Check for a legal reg name in the oopMap and bailout if it is not.
346      if (!omap->legal_vm_reg_name(r)) {
347        regalloc->C->record_method_not_compilable("illegal oopMap register name");
348        continue;
349      }
350      if( mcall ) {
351          // Outgoing argument GC mask responsibility belongs to the callee,
352          // not the caller.  Inspect the inputs to the call, to see if
353          // this live-range is one of them.
354        uint cnt = mcall->tf()->domain()->cnt();
355        uint j;
356        for( j = TypeFunc::Parms; j < cnt; j++)
357          if( mcall->in(j) == def )
358            break;            // reaching def is an argument oop
359        if( j < cnt )         // arg oops dont go in GC map
360          continue;           // Continue on to the next register
361      }
362      omap->set_narrowoop(r);
363    } else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
364      // It's a callee-save value
365      assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
366      debug_only( dup_check[_callees[reg]]=1; )
367      VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg]));
368      if ( callee->is_concrete() || true ) {
369        omap->set_callee_saved( r, callee);
370      }
371
372    } else {
373      // Other - some reaching non-oop value
374      omap->set_value( r);
375#ifdef ASSERT
376      if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) {
377        def->dump();
378        n->dump();
379        assert(false, "there should be a oop in OopMap instead of a live raw oop at safepoint");
380      }
381#endif
382    }
383
384  }
385
386#ifdef ASSERT
387  /* Nice, Intel-only assert
388  int cnt_callee_saves=0;
389  int reg2 = 0;
390  while (OptoReg::is_reg(reg2)) {
391    if( dup_check[reg2] != 0) cnt_callee_saves++;
392    assert( cnt_callee_saves==3 || cnt_callee_saves==5, "missed some callee-save" );
393    reg2++;
394  }
395  */
396#endif
397
398#ifdef ASSERT
399  for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) {
400    OopMapValue omv1 = oms1.current();
401    bool found = false;
402    for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) {
403      if( omv1.content_reg() == oms2.current().reg() ) {
404        found = true;
405        break;
406      }
407    }
408    assert( found, "derived with no base in oopmap" );
409  }
410#endif
411
412  return omap;
413}
414
415//------------------------------do_liveness------------------------------------
416// Compute backwards liveness on registers
417static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *worklist, int max_reg_ints, Arena *A, Dict *safehash ) {
418  int *live = NEW_ARENA_ARRAY(A, int, (cfg->_num_blocks+1) * max_reg_ints);
419  int *tmp_live = &live[cfg->_num_blocks * max_reg_ints];
420  Node *root = cfg->C->root();
421  // On CISC platforms, get the node representing the stack pointer  that regalloc
422  // used for spills
423  Node *fp = NodeSentinel;
424  if (UseCISCSpill && root->req() > 1) {
425    fp = root->in(1)->in(TypeFunc::FramePtr);
426  }
427  memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
428  // Push preds onto worklist
429  for( uint i=1; i<root->req(); i++ )
430    worklist->push(cfg->_bbs[root->in(i)->_idx]);
431
432  // ZKM.jar includes tiny infinite loops which are unreached from below.
433  // If we missed any blocks, we'll retry here after pushing all missed
434  // blocks on the worklist.  Normally this outer loop never trips more
435  // than once.
436  while( 1 ) {
437
438    while( worklist->size() ) { // Standard worklist algorithm
439      Block *b = worklist->rpop();
440
441      // Copy first successor into my tmp_live space
442      int s0num = b->_succs[0]->_pre_order;
443      int *t = &live[s0num*max_reg_ints];
444      for( int i=0; i<max_reg_ints; i++ )
445        tmp_live[i] = t[i];
446
447      // OR in the remaining live registers
448      for( uint j=1; j<b->_num_succs; j++ ) {
449        uint sjnum = b->_succs[j]->_pre_order;
450        int *t = &live[sjnum*max_reg_ints];
451        for( int i=0; i<max_reg_ints; i++ )
452          tmp_live[i] |= t[i];
453      }
454
455      // Now walk tmp_live up the block backwards, computing live
456      for( int k=b->_nodes.size()-1; k>=0; k-- ) {
457        Node *n = b->_nodes[k];
458        // KILL def'd bits
459        int first = regalloc->get_reg_first(n);
460        int second = regalloc->get_reg_second(n);
461        if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first);
462        if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second);
463
464        MachNode *m = n->is_Mach() ? n->as_Mach() : NULL;
465
466        // Check if m is potentially a CISC alternate instruction (i.e, possibly
467        // synthesized by RegAlloc from a conventional instruction and a
468        // spilled input)
469        bool is_cisc_alternate = false;
470        if (UseCISCSpill && m) {
471          is_cisc_alternate = m->is_cisc_alternate();
472        }
473
474        // GEN use'd bits
475        for( uint l=1; l<n->req(); l++ ) {
476          Node *def = n->in(l);
477          assert(def != 0, "input edge required");
478          int first = regalloc->get_reg_first(def);
479          int second = regalloc->get_reg_second(def);
480          if( OptoReg::is_valid(first) ) set_live_bit(tmp_live,first);
481          if( OptoReg::is_valid(second) ) set_live_bit(tmp_live,second);
482          // If we use the stack pointer in a cisc-alternative instruction,
483          // check for use as a memory operand.  Then reconstruct the RegName
484          // for this stack location, and set the appropriate bit in the
485          // live vector 4987749.
486          if (is_cisc_alternate && def == fp) {
487            const TypePtr *adr_type = NULL;
488            intptr_t offset;
489            const Node* base = m->get_base_and_disp(offset, adr_type);
490            if (base == NodeSentinel) {
491              // Machnode has multiple memory inputs. We are unable to reason
492              // with these, but are presuming (with trepidation) that not any of
493              // them are oops. This can be fixed by making get_base_and_disp()
494              // look at a specific input instead of all inputs.
495              assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
496            } else if (base != fp || offset == Type::OffsetBot) {
497              // Do nothing: the fp operand is either not from a memory use
498              // (base == NULL) OR the fp is used in a non-memory context
499              // (base is some other register) OR the offset is not constant,
500              // so it is not a stack slot.
501            } else {
502              assert(offset >= 0, "unexpected negative offset");
503              offset -= (offset % jintSize);  // count the whole word
504              int stack_reg = regalloc->offset2reg(offset);
505              if (OptoReg::is_stack(stack_reg)) {
506                set_live_bit(tmp_live, stack_reg);
507              } else {
508                assert(false, "stack_reg not on stack?");
509              }
510            }
511          }
512        }
513
514        if( n->jvms() ) {       // Record liveness at safepoint
515
516          // This placement of this stanza means inputs to calls are
517          // considered live at the callsite's OopMap.  Argument oops are
518          // hence live, but NOT included in the oopmap.  See cutout in
519          // build_oop_map.  Debug oops are live (and in OopMap).
520          int *n_live = NEW_ARENA_ARRAY(A, int, max_reg_ints);
521          for( int l=0; l<max_reg_ints; l++ )
522            n_live[l] = tmp_live[l];
523          safehash->Insert(n,n_live);
524        }
525
526      }
527
528      // Now at block top, see if we have any changes.  If so, propagate
529      // to prior blocks.
530      int *old_live = &live[b->_pre_order*max_reg_ints];
531      int l;
532      for( l=0; l<max_reg_ints; l++ )
533        if( tmp_live[l] != old_live[l] )
534          break;
535      if( l<max_reg_ints ) {     // Change!
536        // Copy in new value
537        for( l=0; l<max_reg_ints; l++ )
538          old_live[l] = tmp_live[l];
539        // Push preds onto worklist
540        for( l=1; l<(int)b->num_preds(); l++ )
541          worklist->push(cfg->_bbs[b->pred(l)->_idx]);
542      }
543    }
544
545    // Scan for any missing safepoints.  Happens to infinite loops
546    // ala ZKM.jar
547    uint i;
548    for( i=1; i<cfg->_num_blocks; i++ ) {
549      Block *b = cfg->_blocks[i];
550      uint j;
551      for( j=1; j<b->_nodes.size(); j++ )
552        if( b->_nodes[j]->jvms() &&
553            (*safehash)[b->_nodes[j]] == NULL )
554           break;
555      if( j<b->_nodes.size() ) break;
556    }
557    if( i == cfg->_num_blocks )
558      break;                    // Got 'em all
559#ifndef PRODUCT
560    if( PrintOpto && Verbose )
561      tty->print_cr("retripping live calc");
562#endif
563    // Force the issue (expensively): recheck everybody
564    for( i=1; i<cfg->_num_blocks; i++ )
565      worklist->push(cfg->_blocks[i]);
566  }
567
568}
569
570//------------------------------BuildOopMaps-----------------------------------
571// Collect GC mask info - where are all the OOPs?
572void Compile::BuildOopMaps() {
573  NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); )
574  // Can't resource-mark because I need to leave all those OopMaps around,
575  // or else I need to resource-mark some arena other than the default.
576  // ResourceMark rm;              // Reclaim all OopFlows when done
577  int max_reg = _regalloc->_max_reg; // Current array extent
578
579  Arena *A = Thread::current()->resource_area();
580  Block_List worklist;          // Worklist of pending blocks
581
582  int max_reg_ints = round_to(max_reg, BitsPerInt)>>LogBitsPerInt;
583  Dict *safehash = NULL;        // Used for assert only
584  // Compute a backwards liveness per register.  Needs a bitarray of
585  // #blocks x (#registers, rounded up to ints)
586  safehash = new Dict(cmpkey,hashkey,A);
587  do_liveness( _regalloc, _cfg, &worklist, max_reg_ints, A, safehash );
588  OopFlow *free_list = NULL;    // Free, unused
589
590  // Array mapping blocks to completed oopflows
591  OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->_num_blocks);
592  memset( flows, 0, _cfg->_num_blocks*sizeof(OopFlow*) );
593
594
595  // Do the first block 'by hand' to prime the worklist
596  Block *entry = _cfg->_blocks[1];
597  OopFlow *rootflow = OopFlow::make(A,max_reg,this);
598  // Initialize to 'bottom' (not 'top')
599  memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
600  memset( rootflow->_defs   ,            0, max_reg*sizeof(Node*) );
601  flows[entry->_pre_order] = rootflow;
602
603  // Do the first block 'by hand' to prime the worklist
604  rootflow->_b = entry;
605  rootflow->compute_reach( _regalloc, max_reg, safehash );
606  for( uint i=0; i<entry->_num_succs; i++ )
607    worklist.push(entry->_succs[i]);
608
609  // Now worklist contains blocks which have some, but perhaps not all,
610  // predecessors visited.
611  while( worklist.size() ) {
612    // Scan for a block with all predecessors visited, or any randoms slob
613    // otherwise.  All-preds-visited order allows me to recycle OopFlow
614    // structures rapidly and cut down on the memory footprint.
615    // Note: not all predecessors might be visited yet (must happen for
616    // irreducible loops).  This is OK, since every live value must have the
617    // SAME reaching def for the block, so any reaching def is OK.
618    uint i;
619
620    Block *b = worklist.pop();
621    // Ignore root block
622    if( b == _cfg->_broot ) continue;
623    // Block is already done?  Happens if block has several predecessors,
624    // he can get on the worklist more than once.
625    if( flows[b->_pre_order] ) continue;
626
627    // If this block has a visited predecessor AND that predecessor has this
628    // last block as his only undone child, we can move the OopFlow from the
629    // pred to this block.  Otherwise we have to grab a new OopFlow.
630    OopFlow *flow = NULL;       // Flag for finding optimized flow
631    Block *pred = (Block*)0xdeadbeef;
632    uint j;
633    // Scan this block's preds to find a done predecessor
634    for( j=1; j<b->num_preds(); j++ ) {
635      Block *p = _cfg->_bbs[b->pred(j)->_idx];
636      OopFlow *p_flow = flows[p->_pre_order];
637      if( p_flow ) {            // Predecessor is done
638        assert( p_flow->_b == p, "cross check" );
639        pred = p;               // Record some predecessor
640        // If all successors of p are done except for 'b', then we can carry
641        // p_flow forward to 'b' without copying, otherwise we have to draw
642        // from the free_list and clone data.
643        uint k;
644        for( k=0; k<p->_num_succs; k++ )
645          if( !flows[p->_succs[k]->_pre_order] &&
646              p->_succs[k] != b )
647            break;
648
649        // Either carry-forward the now-unused OopFlow for b's use
650        // or draw a new one from the free list
651        if( k==p->_num_succs ) {
652          flow = p_flow;
653          break;                // Found an ideal pred, use him
654        }
655      }
656    }
657
658    if( flow ) {
659      // We have an OopFlow that's the last-use of a predecessor.
660      // Carry it forward.
661    } else {                    // Draw a new OopFlow from the freelist
662      if( !free_list )
663        free_list = OopFlow::make(A,max_reg,C);
664      flow = free_list;
665      assert( flow->_b == NULL, "oopFlow is not free" );
666      free_list = flow->_next;
667      flow->_next = NULL;
668
669      // Copy/clone over the data
670      flow->clone(flows[pred->_pre_order], max_reg);
671    }
672
673    // Mark flow for block.  Blocks can only be flowed over once,
674    // because after the first time they are guarded from entering
675    // this code again.
676    assert( flow->_b == pred, "have some prior flow" );
677    flow->_b = NULL;
678
679    // Now push flow forward
680    flows[b->_pre_order] = flow;// Mark flow for this block
681    flow->_b = b;
682    flow->compute_reach( _regalloc, max_reg, safehash );
683
684    // Now push children onto worklist
685    for( i=0; i<b->_num_succs; i++ )
686      worklist.push(b->_succs[i]);
687
688  }
689}
690