lcm.cpp revision 579:0fbdb4381b99
1/*
2 * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Optimization - Graph Style
26
27#include "incls/_precompiled.incl"
28#include "incls/_lcm.cpp.incl"
29
30//------------------------------implicit_null_check----------------------------
31// Detect implicit-null-check opportunities.  Basically, find NULL checks
32// with suitable memory ops nearby.  Use the memory op to do the NULL check.
33// I can generate a memory op if there is not one nearby.
34// The proj is the control projection for the not-null case.
35// The val is the pointer being checked for nullness.
36void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
37  // Assume if null check need for 0 offset then always needed
38  // Intel solaris doesn't support any null checks yet and no
39  // mechanism exists (yet) to set the switches at an os_cpu level
40  if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
41
42  // Make sure the ptr-is-null path appears to be uncommon!
43  float f = end()->as_MachIf()->_prob;
44  if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
45  if( f > PROB_UNLIKELY_MAG(4) ) return;
46
47  uint bidx = 0;                // Capture index of value into memop
48  bool was_store;               // Memory op is a store op
49
50  // Get the successor block for if the test ptr is non-null
51  Block* not_null_block;  // this one goes with the proj
52  Block* null_block;
53  if (_nodes[_nodes.size()-1] == proj) {
54    null_block     = _succs[0];
55    not_null_block = _succs[1];
56  } else {
57    assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
58    not_null_block = _succs[0];
59    null_block     = _succs[1];
60  }
61  while (null_block->is_Empty() == Block::empty_with_goto) {
62    null_block     = null_block->_succs[0];
63  }
64
65  // Search the exception block for an uncommon trap.
66  // (See Parse::do_if and Parse::do_ifnull for the reason
67  // we need an uncommon trap.  Briefly, we need a way to
68  // detect failure of this optimization, as in 6366351.)
69  {
70    bool found_trap = false;
71    for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
72      Node* nn = null_block->_nodes[i1];
73      if (nn->is_MachCall() &&
74          nn->as_MachCall()->entry_point() ==
75          SharedRuntime::uncommon_trap_blob()->instructions_begin()) {
76        const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
77        if (trtype->isa_int() && trtype->is_int()->is_con()) {
78          jint tr_con = trtype->is_int()->get_con();
79          Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
80          Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
81          assert((int)reason < (int)BitsPerInt, "recode bit map");
82          if (is_set_nth_bit(allowed_reasons, (int) reason)
83              && action != Deoptimization::Action_none) {
84            // This uncommon trap is sure to recompile, eventually.
85            // When that happens, C->too_many_traps will prevent
86            // this transformation from happening again.
87            found_trap = true;
88          }
89        }
90        break;
91      }
92    }
93    if (!found_trap) {
94      // We did not find an uncommon trap.
95      return;
96    }
97  }
98
99  // Search the successor block for a load or store who's base value is also
100  // the tested value.  There may be several.
101  Node_List *out = new Node_List(Thread::current()->resource_area());
102  MachNode *best = NULL;        // Best found so far
103  for (DUIterator i = val->outs(); val->has_out(i); i++) {
104    Node *m = val->out(i);
105    if( !m->is_Mach() ) continue;
106    MachNode *mach = m->as_Mach();
107    was_store = false;
108    switch( mach->ideal_Opcode() ) {
109    case Op_LoadB:
110    case Op_LoadUS:
111    case Op_LoadD:
112    case Op_LoadF:
113    case Op_LoadI:
114    case Op_LoadL:
115    case Op_LoadP:
116    case Op_LoadN:
117    case Op_LoadS:
118    case Op_LoadKlass:
119    case Op_LoadNKlass:
120    case Op_LoadRange:
121    case Op_LoadD_unaligned:
122    case Op_LoadL_unaligned:
123      break;
124    case Op_StoreB:
125    case Op_StoreC:
126    case Op_StoreCM:
127    case Op_StoreD:
128    case Op_StoreF:
129    case Op_StoreI:
130    case Op_StoreL:
131    case Op_StoreP:
132    case Op_StoreN:
133      was_store = true;         // Memory op is a store op
134      // Stores will have their address in slot 2 (memory in slot 1).
135      // If the value being nul-checked is in another slot, it means we
136      // are storing the checked value, which does NOT check the value!
137      if( mach->in(2) != val ) continue;
138      break;                    // Found a memory op?
139    case Op_StrComp:
140    case Op_AryEq:
141      // Not a legit memory op for implicit null check regardless of
142      // embedded loads
143      continue;
144    default:                    // Also check for embedded loads
145      if( !mach->needs_anti_dependence_check() )
146        continue;               // Not an memory op; skip it
147      break;
148    }
149    // check if the offset is not too high for implicit exception
150    {
151      intptr_t offset = 0;
152      const TypePtr *adr_type = NULL;  // Do not need this return value here
153      const Node* base = mach->get_base_and_disp(offset, adr_type);
154      if (base == NULL || base == NodeSentinel) {
155        // Narrow oop address doesn't have base, only index
156        if( val->bottom_type()->isa_narrowoop() &&
157            MacroAssembler::needs_explicit_null_check(offset) )
158          continue;             // Give up if offset is beyond page size
159        // cannot reason about it; is probably not implicit null exception
160      } else {
161        const TypePtr* tptr = base->bottom_type()->is_ptr();
162        // Give up if offset is not a compile-time constant
163        if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
164          continue;
165        offset += tptr->_offset; // correct if base is offseted
166        if( MacroAssembler::needs_explicit_null_check(offset) )
167          continue;             // Give up is reference is beyond 4K page size
168      }
169    }
170
171    // Check ctrl input to see if the null-check dominates the memory op
172    Block *cb = cfg->_bbs[mach->_idx];
173    cb = cb->_idom;             // Always hoist at least 1 block
174    if( !was_store ) {          // Stores can be hoisted only one block
175      while( cb->_dom_depth > (_dom_depth + 1))
176        cb = cb->_idom;         // Hoist loads as far as we want
177      // The non-null-block should dominate the memory op, too. Live
178      // range spilling will insert a spill in the non-null-block if it is
179      // needs to spill the memory op for an implicit null check.
180      if (cb->_dom_depth == (_dom_depth + 1)) {
181        if (cb != not_null_block) continue;
182        cb = cb->_idom;
183      }
184    }
185    if( cb != this ) continue;
186
187    // Found a memory user; see if it can be hoisted to check-block
188    uint vidx = 0;              // Capture index of value into memop
189    uint j;
190    for( j = mach->req()-1; j > 0; j-- ) {
191      if( mach->in(j) == val ) vidx = j;
192      // Block of memory-op input
193      Block *inb = cfg->_bbs[mach->in(j)->_idx];
194      Block *b = this;          // Start from nul check
195      while( b != inb && b->_dom_depth > inb->_dom_depth )
196        b = b->_idom;           // search upwards for input
197      // See if input dominates null check
198      if( b != inb )
199        break;
200    }
201    if( j > 0 )
202      continue;
203    Block *mb = cfg->_bbs[mach->_idx];
204    // Hoisting stores requires more checks for the anti-dependence case.
205    // Give up hoisting if we have to move the store past any load.
206    if( was_store ) {
207      Block *b = mb;            // Start searching here for a local load
208      // mach use (faulting) trying to hoist
209      // n might be blocker to hoisting
210      while( b != this ) {
211        uint k;
212        for( k = 1; k < b->_nodes.size(); k++ ) {
213          Node *n = b->_nodes[k];
214          if( n->needs_anti_dependence_check() &&
215              n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
216            break;              // Found anti-dependent load
217        }
218        if( k < b->_nodes.size() )
219          break;                // Found anti-dependent load
220        // Make sure control does not do a merge (would have to check allpaths)
221        if( b->num_preds() != 2 ) break;
222        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
223      }
224      if( b != this ) continue;
225    }
226
227    // Make sure this memory op is not already being used for a NullCheck
228    Node *e = mb->end();
229    if( e->is_MachNullCheck() && e->in(1) == mach )
230      continue;                 // Already being used as a NULL check
231
232    // Found a candidate!  Pick one with least dom depth - the highest
233    // in the dom tree should be closest to the null check.
234    if( !best ||
235        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
236      best = mach;
237      bidx = vidx;
238
239    }
240  }
241  // No candidate!
242  if( !best ) return;
243
244  // ---- Found an implicit null check
245  extern int implicit_null_checks;
246  implicit_null_checks++;
247
248  // Hoist the memory candidate up to the end of the test block.
249  Block *old_block = cfg->_bbs[best->_idx];
250  old_block->find_remove(best);
251  add_inst(best);
252  cfg->_bbs.map(best->_idx,this);
253
254  // Move the control dependence
255  if (best->in(0) && best->in(0) == old_block->_nodes[0])
256    best->set_req(0, _nodes[0]);
257
258  // Check for flag-killing projections that also need to be hoisted
259  // Should be DU safe because no edge updates.
260  for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
261    Node* n = best->fast_out(j);
262    if( n->Opcode() == Op_MachProj ) {
263      cfg->_bbs[n->_idx]->find_remove(n);
264      add_inst(n);
265      cfg->_bbs.map(n->_idx,this);
266    }
267  }
268
269  Compile *C = cfg->C;
270  // proj==Op_True --> ne test; proj==Op_False --> eq test.
271  // One of two graph shapes got matched:
272  //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
273  //   (IfFalse (If (Bool EQ (CmpP ptr NULL))))
274  // NULL checks are always branch-if-eq.  If we see a IfTrue projection
275  // then we are replacing a 'ne' test with a 'eq' NULL check test.
276  // We need to flip the projections to keep the same semantics.
277  if( proj->Opcode() == Op_IfTrue ) {
278    // Swap order of projections in basic block to swap branch targets
279    Node *tmp1 = _nodes[end_idx()+1];
280    Node *tmp2 = _nodes[end_idx()+2];
281    _nodes.map(end_idx()+1, tmp2);
282    _nodes.map(end_idx()+2, tmp1);
283    Node *tmp = new (C, 1) Node(C->top()); // Use not NULL input
284    tmp1->replace_by(tmp);
285    tmp2->replace_by(tmp1);
286    tmp->replace_by(tmp2);
287    tmp->destruct();
288  }
289
290  // Remove the existing null check; use a new implicit null check instead.
291  // Since schedule-local needs precise def-use info, we need to correct
292  // it as well.
293  Node *old_tst = proj->in(0);
294  MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
295  _nodes.map(end_idx(),nul_chk);
296  cfg->_bbs.map(nul_chk->_idx,this);
297  // Redirect users of old_test to nul_chk
298  for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
299    old_tst->last_out(i2)->set_req(0, nul_chk);
300  // Clean-up any dead code
301  for (uint i3 = 0; i3 < old_tst->req(); i3++)
302    old_tst->set_req(i3, NULL);
303
304  cfg->latency_from_uses(nul_chk);
305  cfg->latency_from_uses(best);
306}
307
308
309//------------------------------select-----------------------------------------
310// Select a nice fellow from the worklist to schedule next. If there is only
311// one choice, then use it. Projections take top priority for correctness
312// reasons - if I see a projection, then it is next.  There are a number of
313// other special cases, for instructions that consume condition codes, et al.
314// These are chosen immediately. Some instructions are required to immediately
315// precede the last instruction in the block, and these are taken last. Of the
316// remaining cases (most), choose the instruction with the greatest latency
317// (that is, the most number of pseudo-cycles required to the end of the
318// routine). If there is a tie, choose the instruction with the most inputs.
319Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
320
321  // If only a single entry on the stack, use it
322  uint cnt = worklist.size();
323  if (cnt == 1) {
324    Node *n = worklist[0];
325    worklist.map(0,worklist.pop());
326    return n;
327  }
328
329  uint choice  = 0; // Bigger is most important
330  uint latency = 0; // Bigger is scheduled first
331  uint score   = 0; // Bigger is better
332  int idx = -1;     // Index in worklist
333
334  for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
335    // Order in worklist is used to break ties.
336    // See caller for how this is used to delay scheduling
337    // of induction variable increments to after the other
338    // uses of the phi are scheduled.
339    Node *n = worklist[i];      // Get Node on worklist
340
341    int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
342    if( n->is_Proj() ||         // Projections always win
343        n->Opcode()== Op_Con || // So does constant 'Top'
344        iop == Op_CreateEx ||   // Create-exception must start block
345        iop == Op_CheckCastPP
346        ) {
347      worklist.map(i,worklist.pop());
348      return n;
349    }
350
351    // Final call in a block must be adjacent to 'catch'
352    Node *e = end();
353    if( e->is_Catch() && e->in(0)->in(0) == n )
354      continue;
355
356    // Memory op for an implicit null check has to be at the end of the block
357    if( e->is_MachNullCheck() && e->in(1) == n )
358      continue;
359
360    uint n_choice  = 2;
361
362    // See if this instruction is consumed by a branch. If so, then (as the
363    // branch is the last instruction in the basic block) force it to the
364    // end of the basic block
365    if ( must_clone[iop] ) {
366      // See if any use is a branch
367      bool found_machif = false;
368
369      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
370        Node* use = n->fast_out(j);
371
372        // The use is a conditional branch, make them adjacent
373        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
374          found_machif = true;
375          break;
376        }
377
378        // More than this instruction pending for successor to be ready,
379        // don't choose this if other opportunities are ready
380        if (ready_cnt[use->_idx] > 1)
381          n_choice = 1;
382      }
383
384      // loop terminated, prefer not to use this instruction
385      if (found_machif)
386        continue;
387    }
388
389    // See if this has a predecessor that is "must_clone", i.e. sets the
390    // condition code. If so, choose this first
391    for (uint j = 0; j < n->req() ; j++) {
392      Node *inn = n->in(j);
393      if (inn) {
394        if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) {
395          n_choice = 3;
396          break;
397        }
398      }
399    }
400
401    // MachTemps should be scheduled last so they are near their uses
402    if (n->is_MachTemp()) {
403      n_choice = 1;
404    }
405
406    uint n_latency = cfg->_node_latency.at_grow(n->_idx);
407    uint n_score   = n->req();   // Many inputs get high score to break ties
408
409    // Keep best latency found
410    if( choice < n_choice ||
411        ( choice == n_choice &&
412          ( latency < n_latency ||
413            ( latency == n_latency &&
414              ( score < n_score ))))) {
415      choice  = n_choice;
416      latency = n_latency;
417      score   = n_score;
418      idx     = i;               // Also keep index in worklist
419    }
420  } // End of for all ready nodes in worklist
421
422  assert(idx >= 0, "index should be set");
423  Node *n = worklist[(uint)idx];      // Get the winner
424
425  worklist.map((uint)idx, worklist.pop());     // Compress worklist
426  return n;
427}
428
429
430//------------------------------set_next_call----------------------------------
431void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
432  if( next_call.test_set(n->_idx) ) return;
433  for( uint i=0; i<n->len(); i++ ) {
434    Node *m = n->in(i);
435    if( !m ) continue;  // must see all nodes in block that precede call
436    if( bbs[m->_idx] == this )
437      set_next_call( m, next_call, bbs );
438  }
439}
440
441//------------------------------needed_for_next_call---------------------------
442// Set the flag 'next_call' for each Node that is needed for the next call to
443// be scheduled.  This flag lets me bias scheduling so Nodes needed for the
444// next subroutine call get priority - basically it moves things NOT needed
445// for the next call till after the call.  This prevents me from trying to
446// carry lots of stuff live across a call.
447void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
448  // Find the next control-defining Node in this block
449  Node* call = NULL;
450  for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
451    Node* m = this_call->fast_out(i);
452    if( bbs[m->_idx] == this && // Local-block user
453        m != this_call &&       // Not self-start node
454        m->is_Call() )
455      call = m;
456      break;
457  }
458  if (call == NULL)  return;    // No next call (e.g., block end is near)
459  // Set next-call for all inputs to this call
460  set_next_call(call, next_call, bbs);
461}
462
463//------------------------------sched_call-------------------------------------
464uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
465  RegMask regs;
466
467  // Schedule all the users of the call right now.  All the users are
468  // projection Nodes, so they must be scheduled next to the call.
469  // Collect all the defined registers.
470  for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
471    Node* n = mcall->fast_out(i);
472    assert( n->Opcode()==Op_MachProj, "" );
473    --ready_cnt[n->_idx];
474    assert( !ready_cnt[n->_idx], "" );
475    // Schedule next to call
476    _nodes.map(node_cnt++, n);
477    // Collect defined registers
478    regs.OR(n->out_RegMask());
479    // Check for scheduling the next control-definer
480    if( n->bottom_type() == Type::CONTROL )
481      // Warm up next pile of heuristic bits
482      needed_for_next_call(n, next_call, bbs);
483
484    // Children of projections are now all ready
485    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
486      Node* m = n->fast_out(j); // Get user
487      if( bbs[m->_idx] != this ) continue;
488      if( m->is_Phi() ) continue;
489      if( !--ready_cnt[m->_idx] )
490        worklist.push(m);
491    }
492
493  }
494
495  // Act as if the call defines the Frame Pointer.
496  // Certainly the FP is alive and well after the call.
497  regs.Insert(matcher.c_frame_pointer());
498
499  // Set all registers killed and not already defined by the call.
500  uint r_cnt = mcall->tf()->range()->cnt();
501  int op = mcall->ideal_Opcode();
502  MachProjNode *proj = new (matcher.C, 1) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
503  bbs.map(proj->_idx,this);
504  _nodes.insert(node_cnt++, proj);
505
506  // Select the right register save policy.
507  const char * save_policy;
508  switch (op) {
509    case Op_CallRuntime:
510    case Op_CallLeaf:
511    case Op_CallLeafNoFP:
512      // Calling C code so use C calling convention
513      save_policy = matcher._c_reg_save_policy;
514      break;
515
516    case Op_CallStaticJava:
517    case Op_CallDynamicJava:
518      // Calling Java code so use Java calling convention
519      save_policy = matcher._register_save_policy;
520      break;
521
522    default:
523      ShouldNotReachHere();
524  }
525
526  // When using CallRuntime mark SOE registers as killed by the call
527  // so values that could show up in the RegisterMap aren't live in a
528  // callee saved register since the register wouldn't know where to
529  // find them.  CallLeaf and CallLeafNoFP are ok because they can't
530  // have debug info on them.  Strictly speaking this only needs to be
531  // done for oops since idealreg2debugmask takes care of debug info
532  // references but there no way to handle oops differently than other
533  // pointers as far as the kill mask goes.
534  bool exclude_soe = op == Op_CallRuntime;
535
536  // Fill in the kill mask for the call
537  for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
538    if( !regs.Member(r) ) {     // Not already defined by the call
539      // Save-on-call register?
540      if ((save_policy[r] == 'C') ||
541          (save_policy[r] == 'A') ||
542          ((save_policy[r] == 'E') && exclude_soe)) {
543        proj->_rout.Insert(r);
544      }
545    }
546  }
547
548  return node_cnt;
549}
550
551
552//------------------------------schedule_local---------------------------------
553// Topological sort within a block.  Someday become a real scheduler.
554bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
555  // Already "sorted" are the block start Node (as the first entry), and
556  // the block-ending Node and any trailing control projections.  We leave
557  // these alone.  PhiNodes and ParmNodes are made to follow the block start
558  // Node.  Everything else gets topo-sorted.
559
560#ifndef PRODUCT
561    if (cfg->trace_opto_pipelining()) {
562      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
563      for (uint i = 0;i < _nodes.size();i++) {
564        tty->print("# ");
565        _nodes[i]->fast_dump();
566      }
567      tty->print_cr("#");
568    }
569#endif
570
571  // RootNode is already sorted
572  if( _nodes.size() == 1 ) return true;
573
574  // Move PhiNodes and ParmNodes from 1 to cnt up to the start
575  uint node_cnt = end_idx();
576  uint phi_cnt = 1;
577  uint i;
578  for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
579    Node *n = _nodes[i];
580    if( n->is_Phi() ||          // Found a PhiNode or ParmNode
581        (n->is_Proj()  && n->in(0) == head()) ) {
582      // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
583      _nodes.map(i,_nodes[phi_cnt]);
584      _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
585    } else {                    // All others
586      // Count block-local inputs to 'n'
587      uint cnt = n->len();      // Input count
588      uint local = 0;
589      for( uint j=0; j<cnt; j++ ) {
590        Node *m = n->in(j);
591        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
592          local++;              // One more block-local input
593      }
594      ready_cnt[n->_idx] = local; // Count em up
595
596      // A few node types require changing a required edge to a precedence edge
597      // before allocation.
598      if( UseConcMarkSweepGC || UseG1GC ) {
599        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
600          // Note: Required edges with an index greater than oper_input_base
601          // are not supported by the allocator.
602          // Note2: Can only depend on unmatched edge being last,
603          // can not depend on its absolute position.
604          Node *oop_store = n->in(n->req() - 1);
605          n->del_req(n->req() - 1);
606          n->add_prec(oop_store);
607          assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
608        }
609      }
610      if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
611          n->req() > TypeFunc::Parms ) {
612        // MemBarAcquire could be created without Precedent edge.
613        // del_req() replaces the specified edge with the last input edge
614        // and then removes the last edge. If the specified edge > number of
615        // edges the last edge will be moved outside of the input edges array
616        // and the edge will be lost. This is why this code should be
617        // executed only when Precedent (== TypeFunc::Parms) edge is present.
618        Node *x = n->in(TypeFunc::Parms);
619        n->del_req(TypeFunc::Parms);
620        n->add_prec(x);
621      }
622    }
623  }
624  for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
625    ready_cnt[_nodes[i2]->_idx] = 0;
626
627  // All the prescheduled guys do not hold back internal nodes
628  uint i3;
629  for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
630    Node *n = _nodes[i3];       // Get pre-scheduled
631    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
632      Node* m = n->fast_out(j);
633      if( cfg->_bbs[m->_idx] ==this ) // Local-block user
634        ready_cnt[m->_idx]--;   // Fix ready count
635    }
636  }
637
638  Node_List delay;
639  // Make a worklist
640  Node_List worklist;
641  for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
642    Node *m = _nodes[i4];
643    if( !ready_cnt[m->_idx] ) {   // Zero ready count?
644      if (m->is_iteratively_computed()) {
645        // Push induction variable increments last to allow other uses
646        // of the phi to be scheduled first. The select() method breaks
647        // ties in scheduling by worklist order.
648        delay.push(m);
649      } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) {
650        // Force the CreateEx to the top of the list so it's processed
651        // first and ends up at the start of the block.
652        worklist.insert(0, m);
653      } else {
654        worklist.push(m);         // Then on to worklist!
655      }
656    }
657  }
658  while (delay.size()) {
659    Node* d = delay.pop();
660    worklist.push(d);
661  }
662
663  // Warm up the 'next_call' heuristic bits
664  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
665
666#ifndef PRODUCT
667    if (cfg->trace_opto_pipelining()) {
668      for (uint j=0; j<_nodes.size(); j++) {
669        Node     *n = _nodes[j];
670        int     idx = n->_idx;
671        tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
672        tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
673        tty->print("%4d: %s\n", idx, n->Name());
674      }
675    }
676#endif
677
678  // Pull from worklist and schedule
679  while( worklist.size() ) {    // Worklist is not ready
680
681#ifndef PRODUCT
682    if (cfg->trace_opto_pipelining()) {
683      tty->print("#   ready list:");
684      for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
685        Node *n = worklist[i];      // Get Node on worklist
686        tty->print(" %d", n->_idx);
687      }
688      tty->cr();
689    }
690#endif
691
692    // Select and pop a ready guy from worklist
693    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
694    _nodes.map(phi_cnt++,n);    // Schedule him next
695
696#ifndef PRODUCT
697    if (cfg->trace_opto_pipelining()) {
698      tty->print("#    select %d: %s", n->_idx, n->Name());
699      tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
700      n->dump();
701      if (Verbose) {
702        tty->print("#   ready list:");
703        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
704          Node *n = worklist[i];      // Get Node on worklist
705          tty->print(" %d", n->_idx);
706        }
707        tty->cr();
708      }
709    }
710
711#endif
712    if( n->is_MachCall() ) {
713      MachCallNode *mcall = n->as_MachCall();
714      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
715      continue;
716    }
717    // Children are now all ready
718    for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
719      Node* m = n->fast_out(i5); // Get user
720      if( cfg->_bbs[m->_idx] != this ) continue;
721      if( m->is_Phi() ) continue;
722      if( !--ready_cnt[m->_idx] )
723        worklist.push(m);
724    }
725  }
726
727  if( phi_cnt != end_idx() ) {
728    // did not schedule all.  Retry, Bailout, or Die
729    Compile* C = matcher.C;
730    if (C->subsume_loads() == true && !C->failing()) {
731      // Retry with subsume_loads == false
732      // If this is the first failure, the sentinel string will "stick"
733      // to the Compile object, and the C2Compiler will see it and retry.
734      C->record_failure(C2Compiler::retry_no_subsuming_loads());
735    }
736    // assert( phi_cnt == end_idx(), "did not schedule all" );
737    return false;
738  }
739
740#ifndef PRODUCT
741  if (cfg->trace_opto_pipelining()) {
742    tty->print_cr("#");
743    tty->print_cr("# after schedule_local");
744    for (uint i = 0;i < _nodes.size();i++) {
745      tty->print("# ");
746      _nodes[i]->fast_dump();
747    }
748    tty->cr();
749  }
750#endif
751
752
753  return true;
754}
755
756//--------------------------catch_cleanup_fix_all_inputs-----------------------
757static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) {
758  for (uint l = 0; l < use->len(); l++) {
759    if (use->in(l) == old_def) {
760      if (l < use->req()) {
761        use->set_req(l, new_def);
762      } else {
763        use->rm_prec(l);
764        use->add_prec(new_def);
765        l--;
766      }
767    }
768  }
769}
770
771//------------------------------catch_cleanup_find_cloned_def------------------
772static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
773  assert( use_blk != def_blk, "Inter-block cleanup only");
774
775  // The use is some block below the Catch.  Find and return the clone of the def
776  // that dominates the use. If there is no clone in a dominating block, then
777  // create a phi for the def in a dominating block.
778
779  // Find which successor block dominates this use.  The successor
780  // blocks must all be single-entry (from the Catch only; I will have
781  // split blocks to make this so), hence they all dominate.
782  while( use_blk->_dom_depth > def_blk->_dom_depth+1 )
783    use_blk = use_blk->_idom;
784
785  // Find the successor
786  Node *fixup = NULL;
787
788  uint j;
789  for( j = 0; j < def_blk->_num_succs; j++ )
790    if( use_blk == def_blk->_succs[j] )
791      break;
792
793  if( j == def_blk->_num_succs ) {
794    // Block at same level in dom-tree is not a successor.  It needs a
795    // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
796    Node_Array inputs = new Node_List(Thread::current()->resource_area());
797    for(uint k = 1; k < use_blk->num_preds(); k++) {
798      inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
799    }
800
801    // Check to see if the use_blk already has an identical phi inserted.
802    // If it exists, it will be at the first position since all uses of a
803    // def are processed together.
804    Node *phi = use_blk->_nodes[1];
805    if( phi->is_Phi() ) {
806      fixup = phi;
807      for (uint k = 1; k < use_blk->num_preds(); k++) {
808        if (phi->in(k) != inputs[k]) {
809          // Not a match
810          fixup = NULL;
811          break;
812        }
813      }
814    }
815
816    // If an existing PhiNode was not found, make a new one.
817    if (fixup == NULL) {
818      Node *new_phi = PhiNode::make(use_blk->head(), def);
819      use_blk->_nodes.insert(1, new_phi);
820      bbs.map(new_phi->_idx, use_blk);
821      for (uint k = 1; k < use_blk->num_preds(); k++) {
822        new_phi->set_req(k, inputs[k]);
823      }
824      fixup = new_phi;
825    }
826
827  } else {
828    // Found the use just below the Catch.  Make it use the clone.
829    fixup = use_blk->_nodes[n_clone_idx];
830  }
831
832  return fixup;
833}
834
835//--------------------------catch_cleanup_intra_block--------------------------
836// Fix all input edges in use that reference "def".  The use is in the same
837// block as the def and both have been cloned in each successor block.
838static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) {
839
840  // Both the use and def have been cloned. For each successor block,
841  // get the clone of the use, and make its input the clone of the def
842  // found in that block.
843
844  uint use_idx = blk->find_node(use);
845  uint offset_idx = use_idx - beg;
846  for( uint k = 0; k < blk->_num_succs; k++ ) {
847    // Get clone in each successor block
848    Block *sb = blk->_succs[k];
849    Node *clone = sb->_nodes[offset_idx+1];
850    assert( clone->Opcode() == use->Opcode(), "" );
851
852    // Make use-clone reference the def-clone
853    catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
854  }
855}
856
857//------------------------------catch_cleanup_inter_block---------------------
858// Fix all input edges in use that reference "def".  The use is in a different
859// block than the def.
860static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
861  if( !use_blk ) return;        // Can happen if the use is a precedence edge
862
863  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
864  catch_cleanup_fix_all_inputs(use, def, new_def);
865}
866
867//------------------------------call_catch_cleanup-----------------------------
868// If we inserted any instructions between a Call and his CatchNode,
869// clone the instructions on all paths below the Catch.
870void Block::call_catch_cleanup(Block_Array &bbs) {
871
872  // End of region to clone
873  uint end = end_idx();
874  if( !_nodes[end]->is_Catch() ) return;
875  // Start of region to clone
876  uint beg = end;
877  while( _nodes[beg-1]->Opcode() != Op_MachProj ||
878        !_nodes[beg-1]->in(0)->is_Call() ) {
879    beg--;
880    assert(beg > 0,"Catch cleanup walking beyond block boundary");
881  }
882  // Range of inserted instructions is [beg, end)
883  if( beg == end ) return;
884
885  // Clone along all Catch output paths.  Clone area between the 'beg' and
886  // 'end' indices.
887  for( uint i = 0; i < _num_succs; i++ ) {
888    Block *sb = _succs[i];
889    // Clone the entire area; ignoring the edge fixup for now.
890    for( uint j = end; j > beg; j-- ) {
891      Node *clone = _nodes[j-1]->clone();
892      sb->_nodes.insert( 1, clone );
893      bbs.map(clone->_idx,sb);
894    }
895  }
896
897
898  // Fixup edges.  Check the def-use info per cloned Node
899  for(uint i2 = beg; i2 < end; i2++ ) {
900    uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
901    Node *n = _nodes[i2];        // Node that got cloned
902    // Need DU safe iterator because of edge manipulation in calls.
903    Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
904    for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
905      out->push(n->fast_out(j1));
906    }
907    uint max = out->size();
908    for (uint j = 0; j < max; j++) {// For all users
909      Node *use = out->pop();
910      Block *buse = bbs[use->_idx];
911      if( use->is_Phi() ) {
912        for( uint k = 1; k < use->req(); k++ )
913          if( use->in(k) == n ) {
914            Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
915            use->set_req(k, fixup);
916          }
917      } else {
918        if (this == buse) {
919          catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
920        } else {
921          catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
922        }
923      }
924    } // End for all users
925
926  } // End of for all Nodes in cloned area
927
928  // Remove the now-dead cloned ops
929  for(uint i3 = beg; i3 < end; i3++ ) {
930    _nodes[beg]->disconnect_inputs(NULL);
931    _nodes.remove(beg);
932  }
933
934  // If the successor blocks have a CreateEx node, move it back to the top
935  for(uint i4 = 0; i4 < _num_succs; i4++ ) {
936    Block *sb = _succs[i4];
937    uint new_cnt = end - beg;
938    // Remove any newly created, but dead, nodes.
939    for( uint j = new_cnt; j > 0; j-- ) {
940      Node *n = sb->_nodes[j];
941      if (n->outcnt() == 0 &&
942          (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
943        n->disconnect_inputs(NULL);
944        sb->_nodes.remove(j);
945        new_cnt--;
946      }
947    }
948    // If any newly created nodes remain, move the CreateEx node to the top
949    if (new_cnt > 0) {
950      Node *cex = sb->_nodes[1+new_cnt];
951      if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
952        sb->_nodes.remove(1+new_cnt);
953        sb->_nodes.insert(1,cex);
954      }
955    }
956  }
957}
958