1/*
2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileLog.hpp"
27#include "memory/allocation.inline.hpp"
28#include "opto/addnode.hpp"
29#include "opto/callnode.hpp"
30#include "opto/castnode.hpp"
31#include "opto/connode.hpp"
32#include "opto/convertnode.hpp"
33#include "opto/divnode.hpp"
34#include "opto/loopnode.hpp"
35#include "opto/mulnode.hpp"
36#include "opto/movenode.hpp"
37#include "opto/opaquenode.hpp"
38#include "opto/rootnode.hpp"
39#include "opto/runtime.hpp"
40#include "opto/subnode.hpp"
41#include "opto/superword.hpp"
42#include "opto/vectornode.hpp"
43
44//------------------------------is_loop_exit-----------------------------------
45// Given an IfNode, return the loop-exiting projection or NULL if both
46// arms remain in the loop.
47Node *IdealLoopTree::is_loop_exit(Node *iff) const {
48  if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
49  PhaseIdealLoop *phase = _phase;
50  // Test is an IfNode, has 2 projections.  If BOTH are in the loop
51  // we need loop unswitching instead of peeling.
52  if( !is_member(phase->get_loop( iff->raw_out(0) )) )
53    return iff->raw_out(0);
54  if( !is_member(phase->get_loop( iff->raw_out(1) )) )
55    return iff->raw_out(1);
56  return NULL;
57}
58
59
60//=============================================================================
61
62
63//------------------------------record_for_igvn----------------------------
64// Put loop body on igvn work list
65void IdealLoopTree::record_for_igvn() {
66  for( uint i = 0; i < _body.size(); i++ ) {
67    Node *n = _body.at(i);
68    _phase->_igvn._worklist.push(n);
69  }
70}
71
72//------------------------------compute_exact_trip_count-----------------------
73// Compute loop trip count if possible. Do not recalculate trip count for
74// split loops (pre-main-post) which have their limits and inits behind Opaque node.
75void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) {
76  if (!_head->as_Loop()->is_valid_counted_loop()) {
77    return;
78  }
79  CountedLoopNode* cl = _head->as_CountedLoop();
80  // Trip count may become nonexact for iteration split loops since
81  // RCE modifies limits. Note, _trip_count value is not reset since
82  // it is used to limit unrolling of main loop.
83  cl->set_nonexact_trip_count();
84
85  // Loop's test should be part of loop.
86  if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
87    return; // Infinite loop
88
89#ifdef ASSERT
90  BoolTest::mask bt = cl->loopexit()->test_trip();
91  assert(bt == BoolTest::lt || bt == BoolTest::gt ||
92         bt == BoolTest::ne, "canonical test is expected");
93#endif
94
95  Node* init_n = cl->init_trip();
96  Node* limit_n = cl->limit();
97  if (init_n != NULL && limit_n != NULL) {
98    // Use longs to avoid integer overflow.
99    int stride_con = cl->stride_con();
100    jlong init_con = phase->_igvn.type(init_n)->is_int()->_lo;
101    jlong limit_con = phase->_igvn.type(limit_n)->is_int()->_hi;
102    int stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
103    jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
104    if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
105      if (init_n->is_Con() && limit_n->is_Con()) {
106        // Set exact trip count.
107        cl->set_exact_trip_count((uint)trip_count);
108      } else if (cl->unrolled_count() == 1) {
109        // Set maximum trip count before unrolling.
110        cl->set_trip_count((uint)trip_count);
111      }
112    }
113  }
114}
115
116//------------------------------compute_profile_trip_cnt----------------------------
117// Compute loop trip count from profile data as
118//    (backedge_count + loop_exit_count) / loop_exit_count
119void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
120  if (!_head->is_CountedLoop()) {
121    return;
122  }
123  CountedLoopNode* head = _head->as_CountedLoop();
124  if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
125    return; // Already computed
126  }
127  float trip_cnt = (float)max_jint; // default is big
128
129  Node* back = head->in(LoopNode::LoopBackControl);
130  while (back != head) {
131    if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
132        back->in(0) &&
133        back->in(0)->is_If() &&
134        back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
135        back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
136      break;
137    }
138    back = phase->idom(back);
139  }
140  if (back != head) {
141    assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
142           back->in(0), "if-projection exists");
143    IfNode* back_if = back->in(0)->as_If();
144    float loop_back_cnt = back_if->_fcnt * back_if->_prob;
145
146    // Now compute a loop exit count
147    float loop_exit_cnt = 0.0f;
148    for( uint i = 0; i < _body.size(); i++ ) {
149      Node *n = _body[i];
150      if( n->is_If() ) {
151        IfNode *iff = n->as_If();
152        if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
153          Node *exit = is_loop_exit(iff);
154          if( exit ) {
155            float exit_prob = iff->_prob;
156            if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
157            if (exit_prob > PROB_MIN) {
158              float exit_cnt = iff->_fcnt * exit_prob;
159              loop_exit_cnt += exit_cnt;
160            }
161          }
162        }
163      }
164    }
165    if (loop_exit_cnt > 0.0f) {
166      trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
167    } else {
168      // No exit count so use
169      trip_cnt = loop_back_cnt;
170    }
171  }
172#ifndef PRODUCT
173  if (TraceProfileTripCount) {
174    tty->print_cr("compute_profile_trip_cnt  lp: %d cnt: %f\n", head->_idx, trip_cnt);
175  }
176#endif
177  head->set_profile_trip_cnt(trip_cnt);
178}
179
180//---------------------is_invariant_addition-----------------------------
181// Return nonzero index of invariant operand for an Add or Sub
182// of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
183int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
184  int op = n->Opcode();
185  if (op == Op_AddI || op == Op_SubI) {
186    bool in1_invar = this->is_invariant(n->in(1));
187    bool in2_invar = this->is_invariant(n->in(2));
188    if (in1_invar && !in2_invar) return 1;
189    if (!in1_invar && in2_invar) return 2;
190  }
191  return 0;
192}
193
194//---------------------reassociate_add_sub-----------------------------
195// Reassociate invariant add and subtract expressions:
196//
197// inv1 + (x + inv2)  =>  ( inv1 + inv2) + x
198// (x + inv2) + inv1  =>  ( inv1 + inv2) + x
199// inv1 + (x - inv2)  =>  ( inv1 - inv2) + x
200// inv1 - (inv2 - x)  =>  ( inv1 - inv2) + x
201// (x + inv2) - inv1  =>  (-inv1 + inv2) + x
202// (x - inv2) + inv1  =>  ( inv1 - inv2) + x
203// (x - inv2) - inv1  =>  (-inv1 - inv2) + x
204// inv1 + (inv2 - x)  =>  ( inv1 + inv2) - x
205// inv1 - (x - inv2)  =>  ( inv1 + inv2) - x
206// (inv2 - x) + inv1  =>  ( inv1 + inv2) - x
207// (inv2 - x) - inv1  =>  (-inv1 + inv2) - x
208// inv1 - (x + inv2)  =>  ( inv1 - inv2) - x
209//
210Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
211  if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
212  if (is_invariant(n1)) return NULL;
213  int inv1_idx = is_invariant_addition(n1, phase);
214  if (!inv1_idx) return NULL;
215  // Don't mess with add of constant (igvn moves them to expression tree root.)
216  if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
217  Node* inv1 = n1->in(inv1_idx);
218  Node* n2 = n1->in(3 - inv1_idx);
219  int inv2_idx = is_invariant_addition(n2, phase);
220  if (!inv2_idx) return NULL;
221  Node* x    = n2->in(3 - inv2_idx);
222  Node* inv2 = n2->in(inv2_idx);
223
224  bool neg_x    = n2->is_Sub() && inv2_idx == 1;
225  bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
226  bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
227  if (n1->is_Sub() && inv1_idx == 1) {
228    neg_x    = !neg_x;
229    neg_inv2 = !neg_inv2;
230  }
231  Node* inv1_c = phase->get_ctrl(inv1);
232  Node* inv2_c = phase->get_ctrl(inv2);
233  Node* n_inv1;
234  if (neg_inv1) {
235    Node *zero = phase->_igvn.intcon(0);
236    phase->set_ctrl(zero, phase->C->root());
237    n_inv1 = new SubINode(zero, inv1);
238    phase->register_new_node(n_inv1, inv1_c);
239  } else {
240    n_inv1 = inv1;
241  }
242  Node* inv;
243  if (neg_inv2) {
244    inv = new SubINode(n_inv1, inv2);
245  } else {
246    inv = new AddINode(n_inv1, inv2);
247  }
248  phase->register_new_node(inv, phase->get_early_ctrl(inv));
249
250  Node* addx;
251  if (neg_x) {
252    addx = new SubINode(inv, x);
253  } else {
254    addx = new AddINode(x, inv);
255  }
256  phase->register_new_node(addx, phase->get_ctrl(x));
257  phase->_igvn.replace_node(n1, addx);
258  assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
259  _body.yank(n1);
260  return addx;
261}
262
263//---------------------reassociate_invariants-----------------------------
264// Reassociate invariant expressions:
265void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
266  for (int i = _body.size() - 1; i >= 0; i--) {
267    Node *n = _body.at(i);
268    for (int j = 0; j < 5; j++) {
269      Node* nn = reassociate_add_sub(n, phase);
270      if (nn == NULL) break;
271      n = nn; // again
272    };
273  }
274}
275
276//------------------------------policy_peeling---------------------------------
277// Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
278// make some loop-invariant test (usually a null-check) happen before the loop.
279bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
280  Node *test = ((IdealLoopTree*)this)->tail();
281  int  body_size = ((IdealLoopTree*)this)->_body.size();
282  // Peeling does loop cloning which can result in O(N^2) node construction
283  if( body_size > 255 /* Prevent overflow for large body_size */
284      || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
285    return false;           // too large to safely clone
286  }
287
288  // check for vectorized loops, any peeling done was already applied
289  if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
290
291  while( test != _head ) {      // Scan till run off top of loop
292    if( test->is_If() ) {       // Test?
293      Node *ctrl = phase->get_ctrl(test->in(1));
294      if (ctrl->is_top())
295        return false;           // Found dead test on live IF?  No peeling!
296      // Standard IF only has one input value to check for loop invariance
297      assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
298      // Condition is not a member of this loop?
299      if( !is_member(phase->get_loop(ctrl)) &&
300          is_loop_exit(test) )
301        return true;            // Found reason to peel!
302    }
303    // Walk up dominators to loop _head looking for test which is
304    // executed on every path thru loop.
305    test = phase->idom(test);
306  }
307  return false;
308}
309
310//------------------------------peeled_dom_test_elim---------------------------
311// If we got the effect of peeling, either by actually peeling or by making
312// a pre-loop which must execute at least once, we can remove all
313// loop-invariant dominated tests in the main body.
314void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
315  bool progress = true;
316  while( progress ) {
317    progress = false;           // Reset for next iteration
318    Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
319    Node *test = prev->in(0);
320    while( test != loop->_head ) { // Scan till run off top of loop
321
322      int p_op = prev->Opcode();
323      if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
324          test->is_If() &&      // Test?
325          !test->in(1)->is_Con() && // And not already obvious?
326          // Condition is not a member of this loop?
327          !loop->is_member(get_loop(get_ctrl(test->in(1))))){
328        // Walk loop body looking for instances of this test
329        for( uint i = 0; i < loop->_body.size(); i++ ) {
330          Node *n = loop->_body.at(i);
331          if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
332            // IfNode was dominated by version in peeled loop body
333            progress = true;
334            dominated_by( old_new[prev->_idx], n );
335          }
336        }
337      }
338      prev = test;
339      test = idom(test);
340    } // End of scan tests in loop
341
342  } // End of while( progress )
343}
344
345//------------------------------do_peeling-------------------------------------
346// Peel the first iteration of the given loop.
347// Step 1: Clone the loop body.  The clone becomes the peeled iteration.
348//         The pre-loop illegally has 2 control users (old & new loops).
349// Step 2: Make the old-loop fall-in edges point to the peeled iteration.
350//         Do this by making the old-loop fall-in edges act as if they came
351//         around the loopback from the prior iteration (follow the old-loop
352//         backedges) and then map to the new peeled iteration.  This leaves
353//         the pre-loop with only 1 user (the new peeled iteration), but the
354//         peeled-loop backedge has 2 users.
355// Step 3: Cut the backedge on the clone (so its not a loop) and remove the
356//         extra backedge user.
357//
358//                   orig
359//
360//                  stmt1
361//                    |
362//                    v
363//              loop predicate
364//                    |
365//                    v
366//                   loop<----+
367//                     |      |
368//                   stmt2    |
369//                     |      |
370//                     v      |
371//                    if      ^
372//                   / \      |
373//                  /   \     |
374//                 v     v    |
375//               false true   |
376//               /       \    |
377//              /         ----+
378//             |
379//             v
380//           exit
381//
382//
383//            after clone loop
384//
385//                   stmt1
386//                     |
387//                     v
388//               loop predicate
389//                 /       \
390//        clone   /         \   orig
391//               /           \
392//              /             \
393//             v               v
394//   +---->loop clone          loop<----+
395//   |      |                    |      |
396//   |    stmt2 clone          stmt2    |
397//   |      |                    |      |
398//   |      v                    v      |
399//   ^      if clone            If      ^
400//   |      / \                / \      |
401//   |     /   \              /   \     |
402//   |    v     v            v     v    |
403//   |    true  false      false true   |
404//   |    /         \      /       \    |
405//   +----           \    /         ----+
406//                    \  /
407//                    1v v2
408//                  region
409//                     |
410//                     v
411//                   exit
412//
413//
414//         after peel and predicate move
415//
416//                   stmt1
417//                    /
418//                   /
419//        clone     /            orig
420//                 /
421//                /              +----------+
422//               /               |          |
423//              /          loop predicate   |
424//             /                 |          |
425//            v                  v          |
426//   TOP-->loop clone          loop<----+   |
427//          |                    |      |   |
428//        stmt2 clone          stmt2    |   |
429//          |                    |      |   ^
430//          v                    v      |   |
431//          if clone            If      ^   |
432//          / \                / \      |   |
433//         /   \              /   \     |   |
434//        v     v            v     v    |   |
435//      true   false      false  true   |   |
436//        |         \      /       \    |   |
437//        |          \    /         ----+   ^
438//        |           \  /                  |
439//        |           1v v2                 |
440//        v         region                  |
441//        |            |                    |
442//        |            v                    |
443//        |          exit                   |
444//        |                                 |
445//        +--------------->-----------------+
446//
447//
448//              final graph
449//
450//                  stmt1
451//                    |
452//                    v
453//                  stmt2 clone
454//                    |
455//                    v
456//                   if clone
457//                  / |
458//                 /  |
459//                v   v
460//            false  true
461//             |      |
462//             |      v
463//             | loop predicate
464//             |      |
465//             |      v
466//             |     loop<----+
467//             |      |       |
468//             |    stmt2     |
469//             |      |       |
470//             |      v       |
471//             v      if      ^
472//             |     /  \     |
473//             |    /    \    |
474//             |   v     v    |
475//             | false  true  |
476//             |  |        \  |
477//             v  v         --+
478//            region
479//              |
480//              v
481//             exit
482//
483void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
484
485  C->set_major_progress();
486  // Peeling a 'main' loop in a pre/main/post situation obfuscates the
487  // 'pre' loop from the main and the 'pre' can no longer have its
488  // iterations adjusted.  Therefore, we need to declare this loop as
489  // no longer a 'main' loop; it will need new pre and post loops before
490  // we can do further RCE.
491#ifndef PRODUCT
492  if (TraceLoopOpts) {
493    tty->print("Peel         ");
494    loop->dump_head();
495  }
496#endif
497  Node* head = loop->_head;
498  bool counted_loop = head->is_CountedLoop();
499  if (counted_loop) {
500    CountedLoopNode *cl = head->as_CountedLoop();
501    assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
502    cl->set_trip_count(cl->trip_count() - 1);
503    if (cl->is_main_loop()) {
504      cl->set_normal_loop();
505#ifndef PRODUCT
506      if (PrintOpto && VerifyLoopOptimizations) {
507        tty->print("Peeling a 'main' loop; resetting to 'normal' ");
508        loop->dump_head();
509      }
510#endif
511    }
512  }
513  Node* entry = head->in(LoopNode::EntryControl);
514
515  // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
516  //         The pre-loop illegally has 2 control users (old & new loops).
517  clone_loop( loop, old_new, dom_depth(head) );
518
519  // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
520  //         Do this by making the old-loop fall-in edges act as if they came
521  //         around the loopback from the prior iteration (follow the old-loop
522  //         backedges) and then map to the new peeled iteration.  This leaves
523  //         the pre-loop with only 1 user (the new peeled iteration), but the
524  //         peeled-loop backedge has 2 users.
525  Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx];
526  _igvn.hash_delete(head);
527  head->set_req(LoopNode::EntryControl, new_entry);
528  for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
529    Node* old = head->fast_out(j);
530    if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
531      Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
532      if (!new_exit_value )     // Backedge value is ALSO loop invariant?
533        // Then loop body backedge value remains the same.
534        new_exit_value = old->in(LoopNode::LoopBackControl);
535      _igvn.hash_delete(old);
536      old->set_req(LoopNode::EntryControl, new_exit_value);
537    }
538  }
539
540
541  // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
542  //         extra backedge user.
543  Node* new_head = old_new[head->_idx];
544  _igvn.hash_delete(new_head);
545  new_head->set_req(LoopNode::LoopBackControl, C->top());
546  for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
547    Node* use = new_head->fast_out(j2);
548    if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
549      _igvn.hash_delete(use);
550      use->set_req(LoopNode::LoopBackControl, C->top());
551    }
552  }
553
554
555  // Step 4: Correct dom-depth info.  Set to loop-head depth.
556  int dd = dom_depth(head);
557  set_idom(head, head->in(1), dd);
558  for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
559    Node *old = loop->_body.at(j3);
560    Node *nnn = old_new[old->_idx];
561    if (!has_ctrl(nnn))
562      set_idom(nnn, idom(nnn), dd-1);
563  }
564
565  // Now force out all loop-invariant dominating tests.  The optimizer
566  // finds some, but we _know_ they are all useless.
567  peeled_dom_test_elim(loop,old_new);
568
569  loop->record_for_igvn();
570}
571
572#define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
573
574//------------------------------policy_maximally_unroll------------------------
575// Calculate exact loop trip count and return true if loop can be maximally
576// unrolled.
577bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
578  CountedLoopNode *cl = _head->as_CountedLoop();
579  assert(cl->is_normal_loop(), "");
580  if (!cl->is_valid_counted_loop())
581    return false; // Malformed counted loop
582
583  if (!cl->has_exact_trip_count()) {
584    // Trip count is not exact.
585    return false;
586  }
587
588  uint trip_count = cl->trip_count();
589  // Note, max_juint is used to indicate unknown trip count.
590  assert(trip_count > 1, "one iteration loop should be optimized out already");
591  assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
592
593  // Real policy: if we maximally unroll, does it get too big?
594  // Allow the unrolled mess to get larger than standard loop
595  // size.  After all, it will no longer be a loop.
596  uint body_size    = _body.size();
597  uint unroll_limit = (uint)LoopUnrollLimit * 4;
598  assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
599  if (trip_count > unroll_limit || body_size > unroll_limit) {
600    return false;
601  }
602
603  // Fully unroll a loop with few iterations regardless next
604  // conditions since following loop optimizations will split
605  // such loop anyway (pre-main-post).
606  if (trip_count <= 3)
607    return true;
608
609  // Take into account that after unroll conjoined heads and tails will fold,
610  // otherwise policy_unroll() may allow more unrolling than max unrolling.
611  uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
612  uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
613  if (body_size != tst_body_size) // Check for int overflow
614    return false;
615  if (new_body_size > unroll_limit ||
616      // Unrolling can result in a large amount of node construction
617      new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
618    return false;
619  }
620
621  // Do not unroll a loop with String intrinsics code.
622  // String intrinsics are large and have loops.
623  for (uint k = 0; k < _body.size(); k++) {
624    Node* n = _body.at(k);
625    switch (n->Opcode()) {
626      case Op_StrComp:
627      case Op_StrEquals:
628      case Op_StrIndexOf:
629      case Op_StrIndexOfChar:
630      case Op_EncodeISOArray:
631      case Op_AryEq:
632      case Op_HasNegatives: {
633        return false;
634      }
635#if INCLUDE_RTM_OPT
636      case Op_FastLock:
637      case Op_FastUnlock: {
638        // Don't unroll RTM locking code because it is large.
639        if (UseRTMLocking) {
640          return false;
641        }
642      }
643#endif
644    } // switch
645  }
646
647  return true; // Do maximally unroll
648}
649
650
651//------------------------------policy_unroll----------------------------------
652// Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
653// the loop is a CountedLoop and the body is small enough.
654bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
655
656  CountedLoopNode *cl = _head->as_CountedLoop();
657  assert(cl->is_normal_loop() || cl->is_main_loop(), "");
658
659  if (!cl->is_valid_counted_loop())
660    return false; // Malformed counted loop
661
662  // Protect against over-unrolling.
663  // After split at least one iteration will be executed in pre-loop.
664  if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
665
666  _local_loop_unroll_limit = LoopUnrollLimit;
667  _local_loop_unroll_factor = 4;
668  int future_unroll_ct = cl->unrolled_count() * 2;
669  if (!cl->do_unroll_only()) {
670    if (future_unroll_ct > LoopMaxUnroll) return false;
671  } else {
672    // obey user constraints on vector mapped loops with additional unrolling applied
673    int unroll_constraint = (cl->slp_max_unroll()) ? cl->slp_max_unroll() : 1;
674    if ((future_unroll_ct / unroll_constraint) > LoopMaxUnroll) return false;
675  }
676
677  // Check for initial stride being a small enough constant
678  if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
679
680  // Don't unroll if the next round of unrolling would push us
681  // over the expected trip count of the loop.  One is subtracted
682  // from the expected trip count because the pre-loop normally
683  // executes 1 iteration.
684  if (UnrollLimitForProfileCheck > 0 &&
685      cl->profile_trip_cnt() != COUNT_UNKNOWN &&
686      future_unroll_ct        > UnrollLimitForProfileCheck &&
687      (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
688    return false;
689  }
690
691  // When unroll count is greater than LoopUnrollMin, don't unroll if:
692  //   the residual iterations are more than 10% of the trip count
693  //   and rounds of "unroll,optimize" are not making significant progress
694  //   Progress defined as current size less than 20% larger than previous size.
695  if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
696      future_unroll_ct > LoopUnrollMin &&
697      (future_unroll_ct - 1) * (100 / LoopPercentProfileLimit) > cl->profile_trip_cnt() &&
698      1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
699    return false;
700  }
701
702  Node *init_n = cl->init_trip();
703  Node *limit_n = cl->limit();
704  int stride_con = cl->stride_con();
705  // Non-constant bounds.
706  // Protect against over-unrolling when init or/and limit are not constant
707  // (so that trip_count's init value is maxint) but iv range is known.
708  if (init_n   == NULL || !init_n->is_Con()  ||
709      limit_n  == NULL || !limit_n->is_Con()) {
710    Node* phi = cl->phi();
711    if (phi != NULL) {
712      assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
713      const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
714      int next_stride = stride_con * 2; // stride after this unroll
715      if (next_stride > 0) {
716        if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
717            iv_type->_lo + next_stride >  iv_type->_hi) {
718          return false;  // over-unrolling
719        }
720      } else if (next_stride < 0) {
721        if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
722            iv_type->_hi + next_stride <  iv_type->_lo) {
723          return false;  // over-unrolling
724        }
725      }
726    }
727  }
728
729  // After unroll limit will be adjusted: new_limit = limit-stride.
730  // Bailout if adjustment overflow.
731  const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
732  if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) ||
733      stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo))
734    return false;  // overflow
735
736  // Adjust body_size to determine if we unroll or not
737  uint body_size = _body.size();
738  // Key test to unroll loop in CRC32 java code
739  int xors_in_loop = 0;
740  // Also count ModL, DivL and MulL which expand mightly
741  for (uint k = 0; k < _body.size(); k++) {
742    Node* n = _body.at(k);
743    switch (n->Opcode()) {
744      case Op_XorI: xors_in_loop++; break; // CRC32 java code
745      case Op_ModL: body_size += 30; break;
746      case Op_DivL: body_size += 30; break;
747      case Op_MulL: body_size += 10; break;
748      case Op_StrComp:
749      case Op_StrEquals:
750      case Op_StrIndexOf:
751      case Op_StrIndexOfChar:
752      case Op_EncodeISOArray:
753      case Op_AryEq:
754      case Op_HasNegatives: {
755        // Do not unroll a loop with String intrinsics code.
756        // String intrinsics are large and have loops.
757        return false;
758      }
759#if INCLUDE_RTM_OPT
760      case Op_FastLock:
761      case Op_FastUnlock: {
762        // Don't unroll RTM locking code because it is large.
763        if (UseRTMLocking) {
764          return false;
765        }
766      }
767#endif
768    } // switch
769  }
770
771  if (UseSuperWord) {
772    if (!cl->is_reduction_loop()) {
773      phase->mark_reductions(this);
774    }
775
776    // Only attempt slp analysis when user controls do not prohibit it
777    if (LoopMaxUnroll > _local_loop_unroll_factor) {
778      // Once policy_slp_analysis succeeds, mark the loop with the
779      // maximal unroll factor so that we minimize analysis passes
780      if (future_unroll_ct >= _local_loop_unroll_factor) {
781        policy_unroll_slp_analysis(cl, phase, future_unroll_ct);
782      }
783    }
784  }
785
786  int slp_max_unroll_factor = cl->slp_max_unroll();
787  if (cl->has_passed_slp()) {
788    if (slp_max_unroll_factor >= future_unroll_ct) return true;
789    // Normal case: loop too big
790    return false;
791  }
792
793  // Check for being too big
794  if (body_size > (uint)_local_loop_unroll_limit) {
795    if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
796    // Normal case: loop too big
797    return false;
798  }
799
800  if (cl->do_unroll_only()) {
801    if (TraceSuperWordLoopUnrollAnalysis) {
802      tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct);
803    }
804  }
805
806  // Unroll once!  (Each trip will soon do double iterations)
807  return true;
808}
809
810void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) {
811  // Enable this functionality target by target as needed
812  if (SuperWordLoopUnrollAnalysis) {
813    if (!cl->was_slp_analyzed()) {
814      SuperWord sw(phase);
815      sw.transform_loop(this, false);
816
817      // If the loop is slp canonical analyze it
818      if (sw.early_return() == false) {
819        sw.unrolling_analysis(_local_loop_unroll_factor);
820      }
821    }
822
823    if (cl->has_passed_slp()) {
824      int slp_max_unroll_factor = cl->slp_max_unroll();
825      if (slp_max_unroll_factor >= future_unroll_ct) {
826        int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
827        if (new_limit > LoopUnrollLimit) {
828          if (TraceSuperWordLoopUnrollAnalysis) {
829            tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit);
830          }
831          _local_loop_unroll_limit = new_limit;
832        }
833      }
834    }
835  }
836}
837
838//------------------------------policy_align-----------------------------------
839// Return TRUE or FALSE if the loop should be cache-line aligned.  Gather the
840// expression that does the alignment.  Note that only one array base can be
841// aligned in a loop (unless the VM guarantees mutual alignment).  Note that
842// if we vectorize short memory ops into longer memory ops, we may want to
843// increase alignment.
844bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
845  return false;
846}
847
848//------------------------------policy_range_check-----------------------------
849// Return TRUE or FALSE if the loop should be range-check-eliminated.
850// Actually we do iteration-splitting, a more powerful form of RCE.
851bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
852  if (!RangeCheckElimination) return false;
853
854  CountedLoopNode *cl = _head->as_CountedLoop();
855  // If we unrolled with no intention of doing RCE and we later
856  // changed our minds, we got no pre-loop.  Either we need to
857  // make a new pre-loop, or we gotta disallow RCE.
858  if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
859  Node *trip_counter = cl->phi();
860
861  // check for vectorized loops, some opts are no longer needed
862  if (cl->do_unroll_only()) return false;
863
864  // Check loop body for tests of trip-counter plus loop-invariant vs
865  // loop-invariant.
866  for (uint i = 0; i < _body.size(); i++) {
867    Node *iff = _body[i];
868    if (iff->Opcode() == Op_If ||
869        iff->Opcode() == Op_RangeCheck) { // Test?
870
871      // Comparing trip+off vs limit
872      Node *bol = iff->in(1);
873      if (bol->req() != 2) continue; // dead constant test
874      if (!bol->is_Bool()) {
875        assert(bol->Opcode() == Op_Conv2B, "predicate check only");
876        continue;
877      }
878      if (bol->as_Bool()->_test._test == BoolTest::ne)
879        continue; // not RC
880
881      Node *cmp = bol->in(1);
882      Node *rc_exp = cmp->in(1);
883      Node *limit = cmp->in(2);
884
885      Node *limit_c = phase->get_ctrl(limit);
886      if( limit_c == phase->C->top() )
887        return false;           // Found dead test on live IF?  No RCE!
888      if( is_member(phase->get_loop(limit_c) ) ) {
889        // Compare might have operands swapped; commute them
890        rc_exp = cmp->in(2);
891        limit  = cmp->in(1);
892        limit_c = phase->get_ctrl(limit);
893        if( is_member(phase->get_loop(limit_c) ) )
894          continue;             // Both inputs are loop varying; cannot RCE
895      }
896
897      if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
898        continue;
899      }
900      // Yeah!  Found a test like 'trip+off vs limit'
901      // Test is an IfNode, has 2 projections.  If BOTH are in the loop
902      // we need loop unswitching instead of iteration splitting.
903      if( is_loop_exit(iff) )
904        return true;            // Found reason to split iterations
905    } // End of is IF
906  }
907
908  return false;
909}
910
911//------------------------------policy_peel_only-------------------------------
912// Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned.  Useful
913// for unrolling loops with NO array accesses.
914bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
915  // check for vectorized loops, any peeling done was already applied
916  if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
917
918  for( uint i = 0; i < _body.size(); i++ )
919    if( _body[i]->is_Mem() )
920      return false;
921
922  // No memory accesses at all!
923  return true;
924}
925
926//------------------------------clone_up_backedge_goo--------------------------
927// If Node n lives in the back_ctrl block and cannot float, we clone a private
928// version of n in preheader_ctrl block and return that, otherwise return n.
929Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
930  if( get_ctrl(n) != back_ctrl ) return n;
931
932  // Only visit once
933  if (visited.test_set(n->_idx)) {
934    Node *x = clones.find(n->_idx);
935    if (x != NULL)
936      return x;
937    return n;
938  }
939
940  Node *x = NULL;               // If required, a clone of 'n'
941  // Check for 'n' being pinned in the backedge.
942  if( n->in(0) && n->in(0) == back_ctrl ) {
943    assert(clones.find(n->_idx) == NULL, "dead loop");
944    x = n->clone();             // Clone a copy of 'n' to preheader
945    clones.push(x, n->_idx);
946    x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
947  }
948
949  // Recursive fixup any other input edges into x.
950  // If there are no changes we can just return 'n', otherwise
951  // we need to clone a private copy and change it.
952  for( uint i = 1; i < n->req(); i++ ) {
953    Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
954    if( g != n->in(i) ) {
955      if( !x ) {
956        assert(clones.find(n->_idx) == NULL, "dead loop");
957        x = n->clone();
958        clones.push(x, n->_idx);
959      }
960      x->set_req(i, g);
961    }
962  }
963  if( x ) {                     // x can legally float to pre-header location
964    register_new_node( x, preheader_ctrl );
965    return x;
966  } else {                      // raise n to cover LCA of uses
967    set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
968  }
969  return n;
970}
971
972bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
973  Node* castii = new CastIINode(incr, TypeInt::INT, true);
974  castii->set_req(0, ctrl);
975  register_new_node(castii, ctrl);
976  for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) {
977    Node* n = incr->fast_out(i);
978    if (n->is_Phi() && n->in(0) == loop) {
979      int nrep = n->replace_edge(incr, castii);
980      return true;
981    }
982  }
983  return false;
984}
985
986//------------------------------insert_pre_post_loops--------------------------
987// Insert pre and post loops.  If peel_only is set, the pre-loop can not have
988// more iterations added.  It acts as a 'peel' only, no lower-bound RCE, no
989// alignment.  Useful to unroll loops that do no array accesses.
990void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
991
992#ifndef PRODUCT
993  if (TraceLoopOpts) {
994    if (peel_only)
995      tty->print("PeelMainPost ");
996    else
997      tty->print("PreMainPost  ");
998    loop->dump_head();
999  }
1000#endif
1001  C->set_major_progress();
1002
1003  // Find common pieces of the loop being guarded with pre & post loops
1004  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1005  assert( main_head->is_normal_loop(), "" );
1006  CountedLoopEndNode *main_end = main_head->loopexit();
1007  guarantee(main_end != NULL, "no loop exit node");
1008  assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
1009  uint dd_main_head = dom_depth(main_head);
1010  uint max = main_head->outcnt();
1011
1012  Node *pre_header= main_head->in(LoopNode::EntryControl);
1013  Node *init      = main_head->init_trip();
1014  Node *incr      = main_end ->incr();
1015  Node *limit     = main_end ->limit();
1016  Node *stride    = main_end ->stride();
1017  Node *cmp       = main_end ->cmp_node();
1018  BoolTest::mask b_test = main_end->test_trip();
1019
1020  // Need only 1 user of 'bol' because I will be hacking the loop bounds.
1021  Node *bol = main_end->in(CountedLoopEndNode::TestValue);
1022  if( bol->outcnt() != 1 ) {
1023    bol = bol->clone();
1024    register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
1025    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
1026  }
1027  // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
1028  if( cmp->outcnt() != 1 ) {
1029    cmp = cmp->clone();
1030    register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
1031    _igvn.replace_input_of(bol, 1, cmp);
1032  }
1033
1034  // Add the post loop
1035  CountedLoopNode *post_head = NULL;
1036  Node *main_exit = insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1037
1038  //------------------------------
1039  // Step B: Create Pre-Loop.
1040
1041  // Step B1: Clone the loop body.  The clone becomes the pre-loop.  The main
1042  // loop pre-header illegally has 2 control users (old & new loops).
1043  clone_loop( loop, old_new, dd_main_head );
1044  CountedLoopNode*    pre_head = old_new[main_head->_idx]->as_CountedLoop();
1045  CountedLoopEndNode* pre_end  = old_new[main_end ->_idx]->as_CountedLoopEnd();
1046  pre_head->set_pre_loop(main_head);
1047  Node *pre_incr = old_new[incr->_idx];
1048
1049  // Reduce the pre-loop trip count.
1050  pre_end->_prob = PROB_FAIR;
1051
1052  // Find the pre-loop normal exit.
1053  Node* pre_exit = pre_end->proj_out(false);
1054  assert( pre_exit->Opcode() == Op_IfFalse, "" );
1055  IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
1056  _igvn.register_new_node_with_optimizer( new_pre_exit );
1057  set_idom(new_pre_exit, pre_end, dd_main_head);
1058  set_loop(new_pre_exit, loop->_parent);
1059
1060  // Step B2: Build a zero-trip guard for the main-loop.  After leaving the
1061  // pre-loop, the main-loop may not execute at all.  Later in life this
1062  // zero-trip guard will become the minimum-trip guard when we unroll
1063  // the main-loop.
1064  Node *min_opaq = new Opaque1Node(C, limit);
1065  Node *min_cmp  = new CmpINode( pre_incr, min_opaq );
1066  Node *min_bol  = new BoolNode( min_cmp, b_test );
1067  register_new_node( min_opaq, new_pre_exit );
1068  register_new_node( min_cmp , new_pre_exit );
1069  register_new_node( min_bol , new_pre_exit );
1070
1071  // Build the IfNode (assume the main-loop is executed always).
1072  IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
1073  _igvn.register_new_node_with_optimizer( min_iff );
1074  set_idom(min_iff, new_pre_exit, dd_main_head);
1075  set_loop(min_iff, loop->_parent);
1076
1077  // Plug in the false-path, taken if we need to skip main-loop
1078  _igvn.hash_delete( pre_exit );
1079  pre_exit->set_req(0, min_iff);
1080  set_idom(pre_exit, min_iff, dd_main_head);
1081  set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
1082  // Make the true-path, must enter the main loop
1083  Node *min_taken = new IfTrueNode( min_iff );
1084  _igvn.register_new_node_with_optimizer( min_taken );
1085  set_idom(min_taken, min_iff, dd_main_head);
1086  set_loop(min_taken, loop->_parent);
1087  // Plug in the true path
1088  _igvn.hash_delete( main_head );
1089  main_head->set_req(LoopNode::EntryControl, min_taken);
1090  set_idom(main_head, min_taken, dd_main_head);
1091
1092  Arena *a = Thread::current()->resource_area();
1093  VectorSet visited(a);
1094  Node_Stack clones(a, main_head->back_control()->outcnt());
1095  // Step B3: Make the fall-in values to the main-loop come from the
1096  // fall-out values of the pre-loop.
1097  for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
1098    Node* main_phi = main_head->fast_out(i2);
1099    if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
1100      Node *pre_phi = old_new[main_phi->_idx];
1101      Node *fallpre  = clone_up_backedge_goo(pre_head->back_control(),
1102                                             main_head->init_control(),
1103                                             pre_phi->in(LoopNode::LoopBackControl),
1104                                             visited, clones);
1105      _igvn.hash_delete(main_phi);
1106      main_phi->set_req( LoopNode::EntryControl, fallpre );
1107    }
1108  }
1109
1110  // Nodes inside the loop may be control dependent on a predicate
1111  // that was moved before the preloop. If the back branch of the main
1112  // or post loops becomes dead, those nodes won't be dependent on the
1113  // test that guards that loop nest anymore which could lead to an
1114  // incorrect array access because it executes independently of the
1115  // test that was guarding the loop nest. We add a special CastII on
1116  // the if branch that enters the loop, between the input induction
1117  // variable value and the induction variable Phi to preserve correct
1118  // dependencies.
1119
1120  // CastII for the main loop:
1121  bool inserted = cast_incr_before_loop( pre_incr, min_taken, main_head );
1122  assert(inserted, "no castII inserted");
1123
1124  // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1125  // RCE and alignment may change this later.
1126  Node *cmp_end = pre_end->cmp_node();
1127  assert( cmp_end->in(2) == limit, "" );
1128  Node *pre_limit = new AddINode( init, stride );
1129
1130  // Save the original loop limit in this Opaque1 node for
1131  // use by range check elimination.
1132  Node *pre_opaq  = new Opaque1Node(C, pre_limit, limit);
1133
1134  register_new_node( pre_limit, pre_head->in(0) );
1135  register_new_node( pre_opaq , pre_head->in(0) );
1136
1137  // Since no other users of pre-loop compare, I can hack limit directly
1138  assert( cmp_end->outcnt() == 1, "no other users" );
1139  _igvn.hash_delete(cmp_end);
1140  cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1141
1142  // Special case for not-equal loop bounds:
1143  // Change pre loop test, main loop test, and the
1144  // main loop guard test to use lt or gt depending on stride
1145  // direction:
1146  // positive stride use <
1147  // negative stride use >
1148  //
1149  // not-equal test is kept for post loop to handle case
1150  // when init > limit when stride > 0 (and reverse).
1151
1152  if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1153
1154    BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1155    // Modify pre loop end condition
1156    Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1157    BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
1158    register_new_node( new_bol0, pre_head->in(0) );
1159    _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
1160    // Modify main loop guard condition
1161    assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1162    BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
1163    register_new_node( new_bol1, new_pre_exit );
1164    _igvn.hash_delete(min_iff);
1165    min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1166    // Modify main loop end condition
1167    BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1168    BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
1169    register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1170    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
1171  }
1172
1173  // Flag main loop
1174  main_head->set_main_loop();
1175  if( peel_only ) main_head->set_main_no_pre_loop();
1176
1177  // Subtract a trip count for the pre-loop.
1178  main_head->set_trip_count(main_head->trip_count() - 1);
1179
1180  // It's difficult to be precise about the trip-counts
1181  // for the pre/post loops.  They are usually very short,
1182  // so guess that 4 trips is a reasonable value.
1183  post_head->set_profile_trip_cnt(4.0);
1184  pre_head->set_profile_trip_cnt(4.0);
1185
1186  // Now force out all loop-invariant dominating tests.  The optimizer
1187  // finds some, but we _know_ they are all useless.
1188  peeled_dom_test_elim(loop,old_new);
1189  loop->record_for_igvn();
1190}
1191
1192//------------------------------insert_vector_post_loop------------------------
1193// Insert a copy of the atomic unrolled vectorized main loop as a post loop,
1194// unroll_policy has already informed us that more unrolling is about to happen to
1195// the main loop.  The resultant post loop will serve as a vectorized drain loop.
1196void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new) {
1197  if (!loop->_head->is_CountedLoop()) return;
1198
1199  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1200
1201  // only process vectorized main loops
1202  if (!cl->is_vectorized_loop() || !cl->is_main_loop()) return;
1203
1204  int slp_max_unroll_factor = cl->slp_max_unroll();
1205  int cur_unroll = cl->unrolled_count();
1206
1207  if (slp_max_unroll_factor == 0) return;
1208
1209  // only process atomic unroll vector loops (not super unrolled after vectorization)
1210  if (cur_unroll != slp_max_unroll_factor) return;
1211
1212  // we only ever process this one time
1213  if (cl->has_atomic_post_loop()) return;
1214
1215#ifndef PRODUCT
1216  if (TraceLoopOpts) {
1217    tty->print("PostVector  ");
1218    loop->dump_head();
1219  }
1220#endif
1221  C->set_major_progress();
1222
1223  // Find common pieces of the loop being guarded with pre & post loops
1224  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1225  CountedLoopEndNode *main_end = main_head->loopexit();
1226  guarantee(main_end != NULL, "no loop exit node");
1227  // diagnostic to show loop end is not properly formed
1228  assert(main_end->outcnt() == 2, "1 true, 1 false path only");
1229
1230  // mark this loop as processed
1231  main_head->mark_has_atomic_post_loop();
1232
1233  Node *incr = main_end->incr();
1234  Node *limit = main_end->limit();
1235
1236  // In this case we throw away the result as we are not using it to connect anything else.
1237  CountedLoopNode *post_head = NULL;
1238  insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1239
1240  // It's difficult to be precise about the trip-counts
1241  // for post loops.  They are usually very short,
1242  // so guess that unit vector trips is a reasonable value.
1243  post_head->set_profile_trip_cnt(cur_unroll);
1244
1245  // Now force out all loop-invariant dominating tests.  The optimizer
1246  // finds some, but we _know_ they are all useless.
1247  peeled_dom_test_elim(loop, old_new);
1248  loop->record_for_igvn();
1249}
1250
1251
1252//-------------------------insert_scalar_rced_post_loop------------------------
1253// Insert a copy of the rce'd main loop as a post loop,
1254// We have not unrolled the main loop, so this is the right time to inject this.
1255// Later we will examine the partner of this post loop pair which still has range checks
1256// to see inject code which tests at runtime if the range checks are applicable.
1257void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List &old_new) {
1258  if (!loop->_head->is_CountedLoop()) return;
1259
1260  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1261
1262  // only process RCE'd main loops
1263  if (!cl->is_main_loop() || cl->range_checks_present()) return;
1264
1265#ifndef PRODUCT
1266  if (TraceLoopOpts) {
1267    tty->print("PostScalarRce  ");
1268    loop->dump_head();
1269  }
1270#endif
1271  C->set_major_progress();
1272
1273  // Find common pieces of the loop being guarded with pre & post loops
1274  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1275  CountedLoopEndNode *main_end = main_head->loopexit();
1276  guarantee(main_end != NULL, "no loop exit node");
1277  // diagnostic to show loop end is not properly formed
1278  assert(main_end->outcnt() == 2, "1 true, 1 false path only");
1279
1280  Node *incr = main_end->incr();
1281  Node *limit = main_end->limit();
1282
1283  // In this case we throw away the result as we are not using it to connect anything else.
1284  CountedLoopNode *post_head = NULL;
1285  insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1286
1287  // It's difficult to be precise about the trip-counts
1288  // for post loops.  They are usually very short,
1289  // so guess that unit vector trips is a reasonable value.
1290  post_head->set_profile_trip_cnt(4.0);
1291  post_head->set_is_rce_post_loop();
1292
1293  // Now force out all loop-invariant dominating tests.  The optimizer
1294  // finds some, but we _know_ they are all useless.
1295  peeled_dom_test_elim(loop, old_new);
1296  loop->record_for_igvn();
1297}
1298
1299
1300//------------------------------insert_post_loop-------------------------------
1301// Insert post loops.  Add a post loop to the given loop passed.
1302Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
1303                                       CountedLoopNode *main_head, CountedLoopEndNode *main_end,
1304                                       Node *incr, Node *limit, CountedLoopNode *&post_head) {
1305
1306  //------------------------------
1307  // Step A: Create a new post-Loop.
1308  Node* main_exit = main_end->proj_out(false);
1309  assert(main_exit->Opcode() == Op_IfFalse, "");
1310  int dd_main_exit = dom_depth(main_exit);
1311
1312  // Step A1: Clone the loop body of main. The clone becomes the post-loop.
1313  // The main loop pre-header illegally has 2 control users (old & new loops).
1314  clone_loop(loop, old_new, dd_main_exit);
1315  assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, "");
1316  post_head = old_new[main_head->_idx]->as_CountedLoop();
1317  post_head->set_normal_loop();
1318  post_head->set_post_loop(main_head);
1319
1320  // Reduce the post-loop trip count.
1321  CountedLoopEndNode* post_end = old_new[main_end->_idx]->as_CountedLoopEnd();
1322  post_end->_prob = PROB_FAIR;
1323
1324  // Build the main-loop normal exit.
1325  IfFalseNode *new_main_exit = new IfFalseNode(main_end);
1326  _igvn.register_new_node_with_optimizer(new_main_exit);
1327  set_idom(new_main_exit, main_end, dd_main_exit);
1328  set_loop(new_main_exit, loop->_parent);
1329
1330  // Step A2: Build a zero-trip guard for the post-loop.  After leaving the
1331  // main-loop, the post-loop may not execute at all.  We 'opaque' the incr
1332  // (the previous loop trip-counter exit value) because we will be changing
1333  // the exit value (via additional unrolling) so we cannot constant-fold away the zero
1334  // trip guard until all unrolling is done.
1335  Node *zer_opaq = new Opaque1Node(C, incr);
1336  Node *zer_cmp = new CmpINode(zer_opaq, limit);
1337  Node *zer_bol = new BoolNode(zer_cmp, main_end->test_trip());
1338  register_new_node(zer_opaq, new_main_exit);
1339  register_new_node(zer_cmp, new_main_exit);
1340  register_new_node(zer_bol, new_main_exit);
1341
1342  // Build the IfNode
1343  IfNode *zer_iff = new IfNode(new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN);
1344  _igvn.register_new_node_with_optimizer(zer_iff);
1345  set_idom(zer_iff, new_main_exit, dd_main_exit);
1346  set_loop(zer_iff, loop->_parent);
1347
1348  // Plug in the false-path, taken if we need to skip this post-loop
1349  _igvn.replace_input_of(main_exit, 0, zer_iff);
1350  set_idom(main_exit, zer_iff, dd_main_exit);
1351  set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
1352  // Make the true-path, must enter this post loop
1353  Node *zer_taken = new IfTrueNode(zer_iff);
1354  _igvn.register_new_node_with_optimizer(zer_taken);
1355  set_idom(zer_taken, zer_iff, dd_main_exit);
1356  set_loop(zer_taken, loop->_parent);
1357  // Plug in the true path
1358  _igvn.hash_delete(post_head);
1359  post_head->set_req(LoopNode::EntryControl, zer_taken);
1360  set_idom(post_head, zer_taken, dd_main_exit);
1361
1362  Arena *a = Thread::current()->resource_area();
1363  VectorSet visited(a);
1364  Node_Stack clones(a, main_head->back_control()->outcnt());
1365  // Step A3: Make the fall-in values to the post-loop come from the
1366  // fall-out values of the main-loop.
1367  for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
1368    Node* main_phi = main_head->fast_out(i);
1369    if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0) {
1370      Node *cur_phi = old_new[main_phi->_idx];
1371      Node *fallnew = clone_up_backedge_goo(main_head->back_control(),
1372                                            post_head->init_control(),
1373                                            main_phi->in(LoopNode::LoopBackControl),
1374                                            visited, clones);
1375      _igvn.hash_delete(cur_phi);
1376      cur_phi->set_req(LoopNode::EntryControl, fallnew);
1377    }
1378  }
1379
1380  // CastII for the new post loop:
1381  bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
1382  assert(inserted, "no castII inserted");
1383
1384  return new_main_exit;
1385}
1386
1387//------------------------------is_invariant-----------------------------
1388// Return true if n is invariant
1389bool IdealLoopTree::is_invariant(Node* n) const {
1390  Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1391  if (n_c->is_top()) return false;
1392  return !is_member(_phase->get_loop(n_c));
1393}
1394
1395
1396//------------------------------do_unroll--------------------------------------
1397// Unroll the loop body one step - make each trip do 2 iterations.
1398void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1399  assert(LoopUnrollLimit, "");
1400  CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1401  CountedLoopEndNode *loop_end = loop_head->loopexit();
1402  assert(loop_end, "");
1403#ifndef PRODUCT
1404  if (PrintOpto && VerifyLoopOptimizations) {
1405    tty->print("Unrolling ");
1406    loop->dump_head();
1407  } else if (TraceLoopOpts) {
1408    if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1409      tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1410    } else {
1411      tty->print("Unroll %d     ", loop_head->unrolled_count()*2);
1412    }
1413    loop->dump_head();
1414  }
1415
1416  if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
1417    Arena* arena = Thread::current()->resource_area();
1418    Node_Stack stack(arena, C->live_nodes() >> 2);
1419    Node_List rpo_list;
1420    VectorSet visited(arena);
1421    visited.set(loop_head->_idx);
1422    rpo( loop_head, stack, visited, rpo_list );
1423    dump(loop, rpo_list.size(), rpo_list );
1424  }
1425#endif
1426
1427  // Remember loop node count before unrolling to detect
1428  // if rounds of unroll,optimize are making progress
1429  loop_head->set_node_count_before_unroll(loop->_body.size());
1430
1431  Node *ctrl  = loop_head->in(LoopNode::EntryControl);
1432  Node *limit = loop_head->limit();
1433  Node *init  = loop_head->init_trip();
1434  Node *stride = loop_head->stride();
1435
1436  Node *opaq = NULL;
1437  if (adjust_min_trip) {       // If not maximally unrolling, need adjustment
1438    // Search for zero-trip guard.
1439
1440    // Check the shape of the graph at the loop entry. If an inappropriate
1441    // graph shape is encountered, the compiler bails out loop unrolling;
1442    // compilation of the method will still succeed.
1443    if (!is_canonical_loop_entry(loop_head)) {
1444      return;
1445    }
1446    opaq = ctrl->in(0)->in(1)->in(1)->in(2);
1447    // Zero-trip test uses an 'opaque' node which is not shared.
1448    assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
1449  }
1450
1451  C->set_major_progress();
1452
1453  Node* new_limit = NULL;
1454  int stride_con = stride->get_int();
1455  int stride_p = (stride_con > 0) ? stride_con : -stride_con;
1456  uint old_trip_count = loop_head->trip_count();
1457  // Verify that unroll policy result is still valid.
1458  assert(old_trip_count > 1 &&
1459      (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
1460
1461  // Adjust loop limit to keep valid iterations number after unroll.
1462  // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
1463  // which may overflow.
1464  if (!adjust_min_trip) {
1465    assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
1466        "odd trip count for maximally unroll");
1467    // Don't need to adjust limit for maximally unroll since trip count is even.
1468  } else if (loop_head->has_exact_trip_count() && init->is_Con()) {
1469    // Loop's limit is constant. Loop's init could be constant when pre-loop
1470    // become peeled iteration.
1471    jlong init_con = init->get_int();
1472    // We can keep old loop limit if iterations count stays the same:
1473    //   old_trip_count == new_trip_count * 2
1474    // Note: since old_trip_count >= 2 then new_trip_count >= 1
1475    // so we also don't need to adjust zero trip test.
1476    jlong limit_con  = limit->get_int();
1477    // (stride_con*2) not overflow since stride_con <= 8.
1478    int new_stride_con = stride_con * 2;
1479    int stride_m    = new_stride_con - (stride_con > 0 ? 1 : -1);
1480    jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con;
1481    // New trip count should satisfy next conditions.
1482    assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
1483    uint new_trip_count = (uint)trip_count;
1484    adjust_min_trip = (old_trip_count != new_trip_count*2);
1485  }
1486
1487  if (adjust_min_trip) {
1488    // Step 2: Adjust the trip limit if it is called for.
1489    // The adjustment amount is -stride. Need to make sure if the
1490    // adjustment underflows or overflows, then the main loop is skipped.
1491    Node* cmp = loop_end->cmp_node();
1492    assert(cmp->in(2) == limit, "sanity");
1493    assert(opaq != NULL && opaq->in(1) == limit, "sanity");
1494
1495    // Verify that policy_unroll result is still valid.
1496    const TypeInt* limit_type = _igvn.type(limit)->is_int();
1497    assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
1498        stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
1499
1500    if (limit->is_Con()) {
1501      // The check in policy_unroll and the assert above guarantee
1502      // no underflow if limit is constant.
1503      new_limit = _igvn.intcon(limit->get_int() - stride_con);
1504      set_ctrl(new_limit, C->root());
1505    } else {
1506      // Limit is not constant.
1507      if (loop_head->unrolled_count() == 1) { // only for first unroll
1508        // Separate limit by Opaque node in case it is an incremented
1509        // variable from previous loop to avoid using pre-incremented
1510        // value which could increase register pressure.
1511        // Otherwise reorg_offsets() optimization will create a separate
1512        // Opaque node for each use of trip-counter and as result
1513        // zero trip guard limit will be different from loop limit.
1514        assert(has_ctrl(opaq), "should have it");
1515        Node* opaq_ctrl = get_ctrl(opaq);
1516        limit = new Opaque2Node( C, limit );
1517        register_new_node( limit, opaq_ctrl );
1518      }
1519      if (stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo) ||
1520          stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi)) {
1521        // No underflow.
1522        new_limit = new SubINode(limit, stride);
1523      } else {
1524        // (limit - stride) may underflow.
1525        // Clamp the adjustment value with MININT or MAXINT:
1526        //
1527        //   new_limit = limit-stride
1528        //   if (stride > 0)
1529        //     new_limit = (limit < new_limit) ? MININT : new_limit;
1530        //   else
1531        //     new_limit = (limit > new_limit) ? MAXINT : new_limit;
1532        //
1533        BoolTest::mask bt = loop_end->test_trip();
1534        assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
1535        Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
1536        set_ctrl(adj_max, C->root());
1537        Node* old_limit = NULL;
1538        Node* adj_limit = NULL;
1539        Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
1540        if (loop_head->unrolled_count() > 1 &&
1541            limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
1542            limit->in(CMoveNode::IfTrue) == adj_max &&
1543            bol->as_Bool()->_test._test == bt &&
1544            bol->in(1)->Opcode() == Op_CmpI &&
1545            bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
1546          // Loop was unrolled before.
1547          // Optimize the limit to avoid nested CMove:
1548          // use original limit as old limit.
1549          old_limit = bol->in(1)->in(1);
1550          // Adjust previous adjusted limit.
1551          adj_limit = limit->in(CMoveNode::IfFalse);
1552          adj_limit = new SubINode(adj_limit, stride);
1553        } else {
1554          old_limit = limit;
1555          adj_limit = new SubINode(limit, stride);
1556        }
1557        assert(old_limit != NULL && adj_limit != NULL, "");
1558        register_new_node( adj_limit, ctrl ); // adjust amount
1559        Node* adj_cmp = new CmpINode(old_limit, adj_limit);
1560        register_new_node( adj_cmp, ctrl );
1561        Node* adj_bool = new BoolNode(adj_cmp, bt);
1562        register_new_node( adj_bool, ctrl );
1563        new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
1564      }
1565      register_new_node(new_limit, ctrl);
1566    }
1567    assert(new_limit != NULL, "");
1568    // Replace in loop test.
1569    assert(loop_end->in(1)->in(1) == cmp, "sanity");
1570    if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
1571      // Don't need to create new test since only one user.
1572      _igvn.hash_delete(cmp);
1573      cmp->set_req(2, new_limit);
1574    } else {
1575      // Create new test since it is shared.
1576      Node* ctrl2 = loop_end->in(0);
1577      Node* cmp2  = cmp->clone();
1578      cmp2->set_req(2, new_limit);
1579      register_new_node(cmp2, ctrl2);
1580      Node* bol2 = loop_end->in(1)->clone();
1581      bol2->set_req(1, cmp2);
1582      register_new_node(bol2, ctrl2);
1583      _igvn.replace_input_of(loop_end, 1, bol2);
1584    }
1585    // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1586    // Make it a 1-trip test (means at least 2 trips).
1587
1588    // Guard test uses an 'opaque' node which is not shared.  Hence I
1589    // can edit it's inputs directly.  Hammer in the new limit for the
1590    // minimum-trip guard.
1591    assert(opaq->outcnt() == 1, "");
1592    _igvn.replace_input_of(opaq, 1, new_limit);
1593  }
1594
1595  // Adjust max trip count. The trip count is intentionally rounded
1596  // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1597  // the main, unrolled, part of the loop will never execute as it is protected
1598  // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
1599  // and later determined that part of the unrolled loop was dead.
1600  loop_head->set_trip_count(old_trip_count / 2);
1601
1602  // Double the count of original iterations in the unrolled loop body.
1603  loop_head->double_unrolled_count();
1604
1605  // ---------
1606  // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
1607  // represents the odd iterations; since the loop trips an even number of
1608  // times its backedge is never taken.  Kill the backedge.
1609  uint dd = dom_depth(loop_head);
1610  clone_loop( loop, old_new, dd );
1611
1612  // Make backedges of the clone equal to backedges of the original.
1613  // Make the fall-in from the original come from the fall-out of the clone.
1614  for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1615    Node* phi = loop_head->fast_out(j);
1616    if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1617      Node *newphi = old_new[phi->_idx];
1618      _igvn.hash_delete( phi );
1619      _igvn.hash_delete( newphi );
1620
1621      phi   ->set_req(LoopNode::   EntryControl, newphi->in(LoopNode::LoopBackControl));
1622      newphi->set_req(LoopNode::LoopBackControl, phi   ->in(LoopNode::LoopBackControl));
1623      phi   ->set_req(LoopNode::LoopBackControl, C->top());
1624    }
1625  }
1626  Node *clone_head = old_new[loop_head->_idx];
1627  _igvn.hash_delete( clone_head );
1628  loop_head ->set_req(LoopNode::   EntryControl, clone_head->in(LoopNode::LoopBackControl));
1629  clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1630  loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1631  loop->_head = clone_head;     // New loop header
1632
1633  set_idom(loop_head,  loop_head ->in(LoopNode::EntryControl), dd);
1634  set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1635
1636  // Kill the clone's backedge
1637  Node *newcle = old_new[loop_end->_idx];
1638  _igvn.hash_delete( newcle );
1639  Node *one = _igvn.intcon(1);
1640  set_ctrl(one, C->root());
1641  newcle->set_req(1, one);
1642  // Force clone into same loop body
1643  uint max = loop->_body.size();
1644  for( uint k = 0; k < max; k++ ) {
1645    Node *old = loop->_body.at(k);
1646    Node *nnn = old_new[old->_idx];
1647    loop->_body.push(nnn);
1648    if (!has_ctrl(old))
1649      set_loop(nnn, loop);
1650  }
1651
1652  loop->record_for_igvn();
1653
1654#ifndef PRODUCT
1655  if (C->do_vector_loop() && (PrintOpto && VerifyLoopOptimizations || TraceLoopOpts)) {
1656    tty->print("\nnew loop after unroll\n");       loop->dump_head();
1657    for (uint i = 0; i < loop->_body.size(); i++) {
1658      loop->_body.at(i)->dump();
1659    }
1660    if(C->clone_map().is_debug()) {
1661      tty->print("\nCloneMap\n");
1662      Dict* dict = C->clone_map().dict();
1663      DictI i(dict);
1664      tty->print_cr("Dict@%p[%d] = ", dict, dict->Size());
1665      for (int ii = 0; i.test(); ++i, ++ii) {
1666        NodeCloneInfo cl((uint64_t)dict->operator[]((void*)i._key));
1667        tty->print("%d->%d:%d,", (int)(intptr_t)i._key, cl.idx(), cl.gen());
1668        if (ii % 10 == 9) {
1669          tty->print_cr(" ");
1670        }
1671      }
1672      tty->print_cr(" ");
1673    }
1674  }
1675#endif
1676
1677}
1678
1679//------------------------------do_maximally_unroll----------------------------
1680
1681void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1682  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1683  assert(cl->has_exact_trip_count(), "trip count is not exact");
1684  assert(cl->trip_count() > 0, "");
1685#ifndef PRODUCT
1686  if (TraceLoopOpts) {
1687    tty->print("MaxUnroll  %d ", cl->trip_count());
1688    loop->dump_head();
1689  }
1690#endif
1691
1692  // If loop is tripping an odd number of times, peel odd iteration
1693  if ((cl->trip_count() & 1) == 1) {
1694    do_peeling(loop, old_new);
1695  }
1696
1697  // Now its tripping an even number of times remaining.  Double loop body.
1698  // Do not adjust pre-guards; they are not needed and do not exist.
1699  if (cl->trip_count() > 0) {
1700    assert((cl->trip_count() & 1) == 0, "missed peeling");
1701    do_unroll(loop, old_new, false);
1702  }
1703}
1704
1705void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
1706  if (SuperWordReductions == false) return;
1707
1708  CountedLoopNode* loop_head = loop->_head->as_CountedLoop();
1709  if (loop_head->unrolled_count() > 1) {
1710    return;
1711  }
1712
1713  Node* trip_phi = loop_head->phi();
1714  for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) {
1715    Node* phi = loop_head->fast_out(i);
1716    if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) {
1717      // For definitions which are loop inclusive and not tripcounts.
1718      Node* def_node = phi->in(LoopNode::LoopBackControl);
1719
1720      if (def_node != NULL) {
1721        Node* n_ctrl = get_ctrl(def_node);
1722        if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) {
1723          // Now test it to see if it fits the standard pattern for a reduction operator.
1724          int opc = def_node->Opcode();
1725          if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) {
1726            if (!def_node->is_reduction()) { // Not marked yet
1727              // To be a reduction, the arithmetic node must have the phi as input and provide a def to it
1728              bool ok = false;
1729              for (unsigned j = 1; j < def_node->req(); j++) {
1730                Node* in = def_node->in(j);
1731                if (in == phi) {
1732                  ok = true;
1733                  break;
1734                }
1735              }
1736
1737              // do nothing if we did not match the initial criteria
1738              if (ok == false) {
1739                continue;
1740              }
1741
1742              // The result of the reduction must not be used in the loop
1743              for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) {
1744                Node* u = def_node->fast_out(i);
1745                if (!loop->is_member(get_loop(ctrl_or_self(u)))) {
1746                  continue;
1747                }
1748                if (u == phi) {
1749                  continue;
1750                }
1751                ok = false;
1752              }
1753
1754              // iff the uses conform
1755              if (ok) {
1756                def_node->add_flag(Node::Flag_is_reduction);
1757                loop_head->mark_has_reductions();
1758              }
1759            }
1760          }
1761        }
1762      }
1763    }
1764  }
1765}
1766
1767//------------------------------adjust_limit-----------------------------------
1768// Helper function for add_constraint().
1769Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
1770  // Compute "I :: (limit-offset)/scale"
1771  Node *con = new SubINode(rc_limit, offset);
1772  register_new_node(con, pre_ctrl);
1773  Node *X = new DivINode(0, con, scale);
1774  register_new_node(X, pre_ctrl);
1775
1776  // Adjust loop limit
1777  loop_limit = (stride_con > 0)
1778               ? (Node*)(new MinINode(loop_limit, X))
1779               : (Node*)(new MaxINode(loop_limit, X));
1780  register_new_node(loop_limit, pre_ctrl);
1781  return loop_limit;
1782}
1783
1784//------------------------------add_constraint---------------------------------
1785// Constrain the main loop iterations so the conditions:
1786//    low_limit <= scale_con * I + offset  <  upper_limit
1787// always holds true.  That is, either increase the number of iterations in
1788// the pre-loop or the post-loop until the condition holds true in the main
1789// loop.  Stride, scale, offset and limit are all loop invariant.  Further,
1790// stride and scale are constants (offset and limit often are).
1791void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1792  // For positive stride, the pre-loop limit always uses a MAX function
1793  // and the main loop a MIN function.  For negative stride these are
1794  // reversed.
1795
1796  // Also for positive stride*scale the affine function is increasing, so the
1797  // pre-loop must check for underflow and the post-loop for overflow.
1798  // Negative stride*scale reverses this; pre-loop checks for overflow and
1799  // post-loop for underflow.
1800
1801  Node *scale = _igvn.intcon(scale_con);
1802  set_ctrl(scale, C->root());
1803
1804  if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
1805    // The overflow limit: scale*I+offset < upper_limit
1806    // For main-loop compute
1807    //   ( if (scale > 0) /* and stride > 0 */
1808    //       I < (upper_limit-offset)/scale
1809    //     else /* scale < 0 and stride < 0 */
1810    //       I > (upper_limit-offset)/scale
1811    //   )
1812    //
1813    // (upper_limit-offset) may overflow or underflow.
1814    // But it is fine since main loop will either have
1815    // less iterations or will be skipped in such case.
1816    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
1817
1818    // The underflow limit: low_limit <= scale*I+offset.
1819    // For pre-loop compute
1820    //   NOT(scale*I+offset >= low_limit)
1821    //   scale*I+offset < low_limit
1822    //   ( if (scale > 0) /* and stride > 0 */
1823    //       I < (low_limit-offset)/scale
1824    //     else /* scale < 0 and stride < 0 */
1825    //       I > (low_limit-offset)/scale
1826    //   )
1827
1828    if (low_limit->get_int() == -max_jint) {
1829      // We need this guard when scale*pre_limit+offset >= limit
1830      // due to underflow. So we need execute pre-loop until
1831      // scale*I+offset >= min_int. But (min_int-offset) will
1832      // underflow when offset > 0 and X will be > original_limit
1833      // when stride > 0. To avoid it we replace positive offset with 0.
1834      //
1835      // Also (min_int+1 == -max_int) is used instead of min_int here
1836      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1837      Node* shift = _igvn.intcon(31);
1838      set_ctrl(shift, C->root());
1839      Node* sign = new RShiftINode(offset, shift);
1840      register_new_node(sign, pre_ctrl);
1841      offset = new AndINode(offset, sign);
1842      register_new_node(offset, pre_ctrl);
1843    } else {
1844      assert(low_limit->get_int() == 0, "wrong low limit for range check");
1845      // The only problem we have here when offset == min_int
1846      // since (0-min_int) == min_int. It may be fine for stride > 0
1847      // but for stride < 0 X will be < original_limit. To avoid it
1848      // max(pre_limit, original_limit) is used in do_range_check().
1849    }
1850    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1851    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
1852
1853  } else { // stride_con*scale_con < 0
1854    // For negative stride*scale pre-loop checks for overflow and
1855    // post-loop for underflow.
1856    //
1857    // The overflow limit: scale*I+offset < upper_limit
1858    // For pre-loop compute
1859    //   NOT(scale*I+offset < upper_limit)
1860    //   scale*I+offset >= upper_limit
1861    //   scale*I+offset+1 > upper_limit
1862    //   ( if (scale < 0) /* and stride > 0 */
1863    //       I < (upper_limit-(offset+1))/scale
1864    //     else /* scale > 0 and stride < 0 */
1865    //       I > (upper_limit-(offset+1))/scale
1866    //   )
1867    //
1868    // (upper_limit-offset-1) may underflow or overflow.
1869    // To avoid it min(pre_limit, original_limit) is used
1870    // in do_range_check() for stride > 0 and max() for < 0.
1871    Node *one  = _igvn.intcon(1);
1872    set_ctrl(one, C->root());
1873
1874    Node *plus_one = new AddINode(offset, one);
1875    register_new_node( plus_one, pre_ctrl );
1876    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1877    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
1878
1879    if (low_limit->get_int() == -max_jint) {
1880      // We need this guard when scale*main_limit+offset >= limit
1881      // due to underflow. So we need execute main-loop while
1882      // scale*I+offset+1 > min_int. But (min_int-offset-1) will
1883      // underflow when (offset+1) > 0 and X will be < main_limit
1884      // when scale < 0 (and stride > 0). To avoid it we replace
1885      // positive (offset+1) with 0.
1886      //
1887      // Also (min_int+1 == -max_int) is used instead of min_int here
1888      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1889      Node* shift = _igvn.intcon(31);
1890      set_ctrl(shift, C->root());
1891      Node* sign = new RShiftINode(plus_one, shift);
1892      register_new_node(sign, pre_ctrl);
1893      plus_one = new AndINode(plus_one, sign);
1894      register_new_node(plus_one, pre_ctrl);
1895    } else {
1896      assert(low_limit->get_int() == 0, "wrong low limit for range check");
1897      // The only problem we have here when offset == max_int
1898      // since (max_int+1) == min_int and (0-min_int) == min_int.
1899      // But it is fine since main loop will either have
1900      // less iterations or will be skipped in such case.
1901    }
1902    // The underflow limit: low_limit <= scale*I+offset.
1903    // For main-loop compute
1904    //   scale*I+offset+1 > low_limit
1905    //   ( if (scale < 0) /* and stride > 0 */
1906    //       I < (low_limit-(offset+1))/scale
1907    //     else /* scale > 0 and stride < 0 */
1908    //       I > (low_limit-(offset+1))/scale
1909    //   )
1910
1911    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
1912  }
1913}
1914
1915
1916//------------------------------is_scaled_iv---------------------------------
1917// Return true if exp is a constant times an induction var
1918bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1919  if (exp == iv) {
1920    if (p_scale != NULL) {
1921      *p_scale = 1;
1922    }
1923    return true;
1924  }
1925  int opc = exp->Opcode();
1926  if (opc == Op_MulI) {
1927    if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1928      if (p_scale != NULL) {
1929        *p_scale = exp->in(2)->get_int();
1930      }
1931      return true;
1932    }
1933    if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1934      if (p_scale != NULL) {
1935        *p_scale = exp->in(1)->get_int();
1936      }
1937      return true;
1938    }
1939  } else if (opc == Op_LShiftI) {
1940    if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1941      if (p_scale != NULL) {
1942        *p_scale = 1 << exp->in(2)->get_int();
1943      }
1944      return true;
1945    }
1946  }
1947  return false;
1948}
1949
1950//-----------------------------is_scaled_iv_plus_offset------------------------------
1951// Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1952bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1953  if (is_scaled_iv(exp, iv, p_scale)) {
1954    if (p_offset != NULL) {
1955      Node *zero = _igvn.intcon(0);
1956      set_ctrl(zero, C->root());
1957      *p_offset = zero;
1958    }
1959    return true;
1960  }
1961  int opc = exp->Opcode();
1962  if (opc == Op_AddI) {
1963    if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1964      if (p_offset != NULL) {
1965        *p_offset = exp->in(2);
1966      }
1967      return true;
1968    }
1969    if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1970      if (p_offset != NULL) {
1971        *p_offset = exp->in(1);
1972      }
1973      return true;
1974    }
1975    if (exp->in(2)->is_Con()) {
1976      Node* offset2 = NULL;
1977      if (depth < 2 &&
1978          is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1979                                   p_offset != NULL ? &offset2 : NULL, depth+1)) {
1980        if (p_offset != NULL) {
1981          Node *ctrl_off2 = get_ctrl(offset2);
1982          Node* offset = new AddINode(offset2, exp->in(2));
1983          register_new_node(offset, ctrl_off2);
1984          *p_offset = offset;
1985        }
1986        return true;
1987      }
1988    }
1989  } else if (opc == Op_SubI) {
1990    if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1991      if (p_offset != NULL) {
1992        Node *zero = _igvn.intcon(0);
1993        set_ctrl(zero, C->root());
1994        Node *ctrl_off = get_ctrl(exp->in(2));
1995        Node* offset = new SubINode(zero, exp->in(2));
1996        register_new_node(offset, ctrl_off);
1997        *p_offset = offset;
1998      }
1999      return true;
2000    }
2001    if (is_scaled_iv(exp->in(2), iv, p_scale)) {
2002      if (p_offset != NULL) {
2003        *p_scale *= -1;
2004        *p_offset = exp->in(1);
2005      }
2006      return true;
2007    }
2008  }
2009  return false;
2010}
2011
2012//------------------------------do_range_check---------------------------------
2013// Eliminate range-checks and other trip-counter vs loop-invariant tests.
2014int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
2015#ifndef PRODUCT
2016  if (PrintOpto && VerifyLoopOptimizations) {
2017    tty->print("Range Check Elimination ");
2018    loop->dump_head();
2019  } else if (TraceLoopOpts) {
2020    tty->print("RangeCheck   ");
2021    loop->dump_head();
2022  }
2023#endif
2024  assert(RangeCheckElimination, "");
2025  CountedLoopNode *cl = loop->_head->as_CountedLoop();
2026  // If we fail before trying to eliminate range checks, set multiversion state
2027  int closed_range_checks = 1;
2028
2029  // protect against stride not being a constant
2030  if (!cl->stride_is_con())
2031    return closed_range_checks;
2032
2033  // Find the trip counter; we are iteration splitting based on it
2034  Node *trip_counter = cl->phi();
2035  // Find the main loop limit; we will trim it's iterations
2036  // to not ever trip end tests
2037  Node *main_limit = cl->limit();
2038
2039  // Check graph shape. Cannot optimize a loop if zero-trip
2040  // Opaque1 node is optimized away and then another round
2041  // of loop opts attempted.
2042  if (!is_canonical_loop_entry(cl)) {
2043    return closed_range_checks;
2044  }
2045
2046  // Need to find the main-loop zero-trip guard
2047  Node *ctrl  = cl->in(LoopNode::EntryControl);
2048  Node *iffm = ctrl->in(0);
2049  Node *opqzm = iffm->in(1)->in(1)->in(2);
2050  assert(opqzm->in(1) == main_limit, "do not understand situation");
2051
2052  // Find the pre-loop limit; we will expand its iterations to
2053  // not ever trip low tests.
2054  Node *p_f = iffm->in(0);
2055  // pre loop may have been optimized out
2056  if (p_f->Opcode() != Op_IfFalse) {
2057    return closed_range_checks;
2058  }
2059  CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2060  assert(pre_end->loopnode()->is_pre_loop(), "");
2061  Node *pre_opaq1 = pre_end->limit();
2062  // Occasionally it's possible for a pre-loop Opaque1 node to be
2063  // optimized away and then another round of loop opts attempted.
2064  // We can not optimize this particular loop in that case.
2065  if (pre_opaq1->Opcode() != Op_Opaque1)
2066    return closed_range_checks;
2067  Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
2068  Node *pre_limit = pre_opaq->in(1);
2069
2070  // Where do we put new limit calculations
2071  Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
2072
2073  // Ensure the original loop limit is available from the
2074  // pre-loop Opaque1 node.
2075  Node *orig_limit = pre_opaq->original_loop_limit();
2076  if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
2077    return closed_range_checks;
2078
2079  // Must know if its a count-up or count-down loop
2080
2081  int stride_con = cl->stride_con();
2082  Node *zero = _igvn.intcon(0);
2083  Node *one  = _igvn.intcon(1);
2084  // Use symmetrical int range [-max_jint,max_jint]
2085  Node *mini = _igvn.intcon(-max_jint);
2086  set_ctrl(zero, C->root());
2087  set_ctrl(one,  C->root());
2088  set_ctrl(mini, C->root());
2089
2090  // Range checks that do not dominate the loop backedge (ie.
2091  // conditionally executed) can lengthen the pre loop limit beyond
2092  // the original loop limit. To prevent this, the pre limit is
2093  // (for stride > 0) MINed with the original loop limit (MAXed
2094  // stride < 0) when some range_check (rc) is conditionally
2095  // executed.
2096  bool conditional_rc = false;
2097
2098  // Count number of range checks and reduce by load range limits, if zero,
2099  // the loop is in canonical form to multiversion.
2100  closed_range_checks = 0;
2101
2102  // Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
2103  for( uint i = 0; i < loop->_body.size(); i++ ) {
2104    Node *iff = loop->_body[i];
2105    if (iff->Opcode() == Op_If ||
2106        iff->Opcode() == Op_RangeCheck) { // Test?
2107      // Test is an IfNode, has 2 projections.  If BOTH are in the loop
2108      // we need loop unswitching instead of iteration splitting.
2109      closed_range_checks++;
2110      Node *exit = loop->is_loop_exit(iff);
2111      if( !exit ) continue;
2112      int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
2113
2114      // Get boolean condition to test
2115      Node *i1 = iff->in(1);
2116      if( !i1->is_Bool() ) continue;
2117      BoolNode *bol = i1->as_Bool();
2118      BoolTest b_test = bol->_test;
2119      // Flip sense of test if exit condition is flipped
2120      if( flip )
2121        b_test = b_test.negate();
2122
2123      // Get compare
2124      Node *cmp = bol->in(1);
2125
2126      // Look for trip_counter + offset vs limit
2127      Node *rc_exp = cmp->in(1);
2128      Node *limit  = cmp->in(2);
2129      jint scale_con= 1;        // Assume trip counter not scaled
2130
2131      Node *limit_c = get_ctrl(limit);
2132      if( loop->is_member(get_loop(limit_c) ) ) {
2133        // Compare might have operands swapped; commute them
2134        b_test = b_test.commute();
2135        rc_exp = cmp->in(2);
2136        limit  = cmp->in(1);
2137        limit_c = get_ctrl(limit);
2138        if( loop->is_member(get_loop(limit_c) ) )
2139          continue;             // Both inputs are loop varying; cannot RCE
2140      }
2141      // Here we know 'limit' is loop invariant
2142
2143      // 'limit' maybe pinned below the zero trip test (probably from a
2144      // previous round of rce), in which case, it can't be used in the
2145      // zero trip test expression which must occur before the zero test's if.
2146      if( limit_c == ctrl ) {
2147        continue;  // Don't rce this check but continue looking for other candidates.
2148      }
2149
2150      // Check for scaled induction variable plus an offset
2151      Node *offset = NULL;
2152
2153      if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
2154        continue;
2155      }
2156
2157      Node *offset_c = get_ctrl(offset);
2158      if( loop->is_member( get_loop(offset_c) ) )
2159        continue;               // Offset is not really loop invariant
2160      // Here we know 'offset' is loop invariant.
2161
2162      // As above for the 'limit', the 'offset' maybe pinned below the
2163      // zero trip test.
2164      if( offset_c == ctrl ) {
2165        continue; // Don't rce this check but continue looking for other candidates.
2166      }
2167#ifdef ASSERT
2168      if (TraceRangeLimitCheck) {
2169        tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
2170        bol->dump(2);
2171      }
2172#endif
2173      // At this point we have the expression as:
2174      //   scale_con * trip_counter + offset :: limit
2175      // where scale_con, offset and limit are loop invariant.  Trip_counter
2176      // monotonically increases by stride_con, a constant.  Both (or either)
2177      // stride_con and scale_con can be negative which will flip about the
2178      // sense of the test.
2179
2180      // Adjust pre and main loop limits to guard the correct iteration set
2181      if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
2182        if( b_test._test == BoolTest::lt ) { // Range checks always use lt
2183          // The underflow and overflow limits: 0 <= scale*I+offset < limit
2184          add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
2185          // (0-offset)/scale could be outside of loop iterations range.
2186          conditional_rc = true;
2187        } else {
2188          if (PrintOpto) {
2189            tty->print_cr("missed RCE opportunity");
2190          }
2191          continue;             // In release mode, ignore it
2192        }
2193      } else {                  // Otherwise work on normal compares
2194        switch( b_test._test ) {
2195        case BoolTest::gt:
2196          // Fall into GE case
2197        case BoolTest::ge:
2198          // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
2199          scale_con = -scale_con;
2200          offset = new SubINode( zero, offset );
2201          register_new_node( offset, pre_ctrl );
2202          limit  = new SubINode( zero, limit );
2203          register_new_node( limit, pre_ctrl );
2204          // Fall into LE case
2205        case BoolTest::le:
2206          if (b_test._test != BoolTest::gt) {
2207            // Convert X <= Y to X < Y+1
2208            limit = new AddINode( limit, one );
2209            register_new_node( limit, pre_ctrl );
2210          }
2211          // Fall into LT case
2212        case BoolTest::lt:
2213          // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
2214          // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
2215          // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
2216          add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
2217          // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
2218          // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
2219          // still be outside of loop range.
2220          conditional_rc = true;
2221          break;
2222        default:
2223          if (PrintOpto) {
2224            tty->print_cr("missed RCE opportunity");
2225          }
2226          continue;             // Unhandled case
2227        }
2228      }
2229
2230      // Kill the eliminated test
2231      C->set_major_progress();
2232      Node *kill_con = _igvn.intcon( 1-flip );
2233      set_ctrl(kill_con, C->root());
2234      _igvn.replace_input_of(iff, 1, kill_con);
2235      // Find surviving projection
2236      assert(iff->is_If(), "");
2237      ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
2238      // Find loads off the surviving projection; remove their control edge
2239      for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
2240        Node* cd = dp->fast_out(i); // Control-dependent node
2241        if (cd->is_Load() && cd->depends_only_on_test()) {   // Loads can now float around in the loop
2242          // Allow the load to float around in the loop, or before it
2243          // but NOT before the pre-loop.
2244          _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
2245          --i;
2246          --imax;
2247        }
2248      }
2249      if (limit->Opcode() == Op_LoadRange) {
2250        closed_range_checks--;
2251      }
2252
2253    } // End of is IF
2254
2255  }
2256
2257  // Update loop limits
2258  if (conditional_rc) {
2259    pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit)
2260                                 : (Node*)new MaxINode(pre_limit, orig_limit);
2261    register_new_node(pre_limit, pre_ctrl);
2262  }
2263  _igvn.replace_input_of(pre_opaq, 1, pre_limit);
2264
2265  // Note:: we are making the main loop limit no longer precise;
2266  // need to round up based on stride.
2267  cl->set_nonexact_trip_count();
2268  Node *main_cle = cl->loopexit();
2269  Node *main_bol = main_cle->in(1);
2270  // Hacking loop bounds; need private copies of exit test
2271  if( main_bol->outcnt() > 1 ) {// BoolNode shared?
2272    main_bol = main_bol->clone();// Clone a private BoolNode
2273    register_new_node( main_bol, main_cle->in(0) );
2274    _igvn.replace_input_of(main_cle, 1, main_bol);
2275  }
2276  Node *main_cmp = main_bol->in(1);
2277  if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
2278    main_cmp = main_cmp->clone();// Clone a private CmpNode
2279    register_new_node( main_cmp, main_cle->in(0) );
2280    _igvn.replace_input_of(main_bol, 1, main_cmp);
2281  }
2282  // Hack the now-private loop bounds
2283  _igvn.replace_input_of(main_cmp, 2, main_limit);
2284  // The OpaqueNode is unshared by design
2285  assert( opqzm->outcnt() == 1, "cannot hack shared node" );
2286  _igvn.replace_input_of(opqzm, 1, main_limit);
2287
2288  return closed_range_checks;
2289}
2290
2291//------------------------------has_range_checks-------------------------------
2292// Check to see if RCE cleaned the current loop of range-checks.
2293void PhaseIdealLoop::has_range_checks(IdealLoopTree *loop) {
2294  assert(RangeCheckElimination, "");
2295
2296  // skip if not a counted loop
2297  if (!loop->is_counted()) return;
2298
2299  CountedLoopNode *cl = loop->_head->as_CountedLoop();
2300
2301  // skip this loop if it is already checked
2302  if (cl->has_been_range_checked()) return;
2303
2304  // Now check for existence of range checks
2305  for (uint i = 0; i < loop->_body.size(); i++) {
2306    Node *iff = loop->_body[i];
2307    int iff_opc = iff->Opcode();
2308    if (iff_opc == Op_If || iff_opc == Op_RangeCheck) {
2309      cl->mark_has_range_checks();
2310      break;
2311    }
2312  }
2313  cl->set_has_been_range_checked();
2314}
2315
2316//-------------------------multi_version_post_loops----------------------------
2317// Check the range checks that remain, if simple, use the bounds to guard
2318// which version to a post loop we execute, one with range checks or one without
2319bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop) {
2320  bool multi_version_succeeded = false;
2321  assert(RangeCheckElimination, "");
2322  CountedLoopNode *legacy_cl = legacy_loop->_head->as_CountedLoop();
2323  assert(legacy_cl->is_post_loop(), "");
2324
2325  // Check for existence of range checks using the unique instance to make a guard with
2326  Unique_Node_List worklist;
2327  for (uint i = 0; i < legacy_loop->_body.size(); i++) {
2328    Node *iff = legacy_loop->_body[i];
2329    int iff_opc = iff->Opcode();
2330    if (iff_opc == Op_If || iff_opc == Op_RangeCheck) {
2331      worklist.push(iff);
2332    }
2333  }
2334
2335  // Find RCE'd post loop so that we can stage its guard.
2336  if (!is_canonical_loop_entry(legacy_cl)) return multi_version_succeeded;
2337  Node* ctrl = legacy_cl->in(LoopNode::EntryControl);
2338  Node* iffm = ctrl->in(0);
2339
2340  // Now we test that both the post loops are connected
2341  Node* post_loop_region = iffm->in(0);
2342  if (post_loop_region == NULL) return multi_version_succeeded;
2343  if (!post_loop_region->is_Region()) return multi_version_succeeded;
2344  Node* covering_region = post_loop_region->in(RegionNode::Control+1);
2345  if (covering_region == NULL) return multi_version_succeeded;
2346  if (!covering_region->is_Region()) return multi_version_succeeded;
2347  Node* p_f = covering_region->in(RegionNode::Control);
2348  if (p_f == NULL) return multi_version_succeeded;
2349  if (!p_f->is_IfFalse()) return multi_version_succeeded;
2350  if (!p_f->in(0)->is_CountedLoopEnd()) return multi_version_succeeded;
2351  CountedLoopEndNode* rce_loop_end = p_f->in(0)->as_CountedLoopEnd();
2352  if (rce_loop_end == NULL) return multi_version_succeeded;
2353  CountedLoopNode* rce_cl = rce_loop_end->loopnode();
2354  if (rce_cl == NULL || !rce_cl->is_post_loop()) return multi_version_succeeded;
2355  CountedLoopNode *known_rce_cl = rce_loop->_head->as_CountedLoop();
2356  if (rce_cl != known_rce_cl) return multi_version_succeeded;
2357
2358  // Then we fetch the cover entry test
2359  ctrl = rce_cl->in(LoopNode::EntryControl);
2360  if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return multi_version_succeeded;
2361
2362#ifndef PRODUCT
2363  if (TraceLoopOpts) {
2364    tty->print("PostMultiVersion\n");
2365    rce_loop->dump_head();
2366    legacy_loop->dump_head();
2367  }
2368#endif
2369
2370  // Now fetch the limit we want to compare against
2371  Node *limit = rce_cl->limit();
2372  bool first_time = true;
2373
2374  // If we got this far, we identified the post loop which has been RCE'd and
2375  // we have a work list.  Now we will try to transform the if guard to cause
2376  // the loop pair to be multi version executed with the determination left to runtime
2377  // or the optimizer if full information is known about the given arrays at compile time.
2378  Node *last_min = NULL;
2379  multi_version_succeeded = true;
2380  while (worklist.size()) {
2381    Node* rc_iffm = worklist.pop();
2382    if (rc_iffm->is_If()) {
2383      Node *rc_bolzm = rc_iffm->in(1);
2384      if (rc_bolzm->is_Bool()) {
2385        Node *rc_cmpzm = rc_bolzm->in(1);
2386        if (rc_cmpzm->is_Cmp()) {
2387          Node *rc_left = rc_cmpzm->in(2);
2388          if (rc_left->Opcode() != Op_LoadRange) {
2389            multi_version_succeeded = false;
2390            break;
2391          }
2392          if (first_time) {
2393            last_min = rc_left;
2394            first_time = false;
2395          } else {
2396            Node *cur_min = new MinINode(last_min, rc_left);
2397            last_min = cur_min;
2398            _igvn.register_new_node_with_optimizer(last_min);
2399          }
2400        }
2401      }
2402    }
2403  }
2404
2405  // All we have to do is update the limit of the rce loop
2406  // with the min of our expression and the current limit.
2407  // We will use this expression to replace the current limit.
2408  if (last_min && multi_version_succeeded) {
2409    Node *cur_min = new MinINode(last_min, limit);
2410    _igvn.register_new_node_with_optimizer(cur_min);
2411    Node *cmp_node = rce_loop_end->cmp_node();
2412    _igvn.replace_input_of(cmp_node, 2, cur_min);
2413    set_idom(cmp_node, cur_min, dom_depth(ctrl));
2414    set_ctrl(cur_min, ctrl);
2415    set_loop(cur_min, rce_loop->_parent);
2416
2417    legacy_cl->mark_is_multiversioned();
2418    rce_cl->mark_is_multiversioned();
2419    multi_version_succeeded = true;
2420
2421    C->set_major_progress();
2422  }
2423
2424  return multi_version_succeeded;
2425}
2426
2427//-------------------------poison_rce_post_loop--------------------------------
2428// Causes the rce'd post loop to be optimized away if multiversioning fails
2429void PhaseIdealLoop::poison_rce_post_loop(IdealLoopTree *rce_loop) {
2430  CountedLoopNode *rce_cl = rce_loop->_head->as_CountedLoop();
2431  Node* ctrl = rce_cl->in(LoopNode::EntryControl);
2432  if (ctrl->is_IfTrue() || ctrl->is_IfFalse()) {
2433    Node* iffm = ctrl->in(0);
2434    if (iffm->is_If()) {
2435      Node* cur_bool = iffm->in(1);
2436      if (cur_bool->is_Bool()) {
2437        Node* cur_cmp = cur_bool->in(1);
2438        if (cur_cmp->is_Cmp()) {
2439          BoolTest::mask new_test = BoolTest::gt;
2440          BoolNode *new_bool = new BoolNode(cur_cmp, new_test);
2441          _igvn.replace_node(cur_bool, new_bool);
2442          _igvn._worklist.push(new_bool);
2443          Node* left_op = cur_cmp->in(1);
2444          _igvn.replace_input_of(cur_cmp, 2, left_op);
2445          C->set_major_progress();
2446        }
2447      }
2448    }
2449  }
2450}
2451
2452//------------------------------DCE_loop_body----------------------------------
2453// Remove simplistic dead code from loop body
2454void IdealLoopTree::DCE_loop_body() {
2455  for( uint i = 0; i < _body.size(); i++ )
2456    if( _body.at(i)->outcnt() == 0 )
2457      _body.map( i--, _body.pop() );
2458}
2459
2460
2461//------------------------------adjust_loop_exit_prob--------------------------
2462// Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
2463// Replace with a 1-in-10 exit guess.
2464void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
2465  Node *test = tail();
2466  while( test != _head ) {
2467    uint top = test->Opcode();
2468    if( top == Op_IfTrue || top == Op_IfFalse ) {
2469      int test_con = ((ProjNode*)test)->_con;
2470      assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
2471      IfNode *iff = test->in(0)->as_If();
2472      if( iff->outcnt() == 2 ) {        // Ignore dead tests
2473        Node *bol = iff->in(1);
2474        if( bol && bol->req() > 1 && bol->in(1) &&
2475            ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
2476             (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
2477             (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
2478             (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) ||
2479             (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) ||
2480             (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
2481             (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
2482             (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
2483             (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
2484             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB ) ||
2485             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS ) ||
2486             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
2487             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
2488             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
2489             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
2490             (bol->in(1)->Opcode() == Op_CompareAndSwapB ) ||
2491             (bol->in(1)->Opcode() == Op_CompareAndSwapS ) ||
2492             (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
2493             (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
2494             (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
2495             (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
2496          return;               // Allocation loops RARELY take backedge
2497        // Find the OTHER exit path from the IF
2498        Node* ex = iff->proj_out(1-test_con);
2499        float p = iff->_prob;
2500        if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
2501          if( top == Op_IfTrue ) {
2502            if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
2503              iff->_prob = PROB_STATIC_FREQUENT;
2504            }
2505          } else {
2506            if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
2507              iff->_prob = PROB_STATIC_INFREQUENT;
2508            }
2509          }
2510        }
2511      }
2512    }
2513    test = phase->idom(test);
2514  }
2515}
2516
2517#ifdef ASSERT
2518static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
2519  Node *ctrl  = cl->in(LoopNode::EntryControl);
2520  assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
2521  Node *iffm = ctrl->in(0);
2522  assert(iffm->Opcode() == Op_If, "");
2523  Node *p_f = iffm->in(0);
2524  assert(p_f->Opcode() == Op_IfFalse, "");
2525  CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2526  assert(pre_end->loopnode()->is_pre_loop(), "");
2527  return pre_end->loopnode();
2528}
2529#endif
2530
2531// Remove the main and post loops and make the pre loop execute all
2532// iterations. Useful when the pre loop is found empty.
2533void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) {
2534  CountedLoopEndNode* pre_end = cl->loopexit();
2535  Node* pre_cmp = pre_end->cmp_node();
2536  if (pre_cmp->in(2)->Opcode() != Op_Opaque1) {
2537    // Only safe to remove the main loop if the compiler optimized it
2538    // out based on an unknown number of iterations
2539    return;
2540  }
2541
2542  // Can we find the main loop?
2543  if (_next == NULL) {
2544    return;
2545  }
2546
2547  Node* next_head = _next->_head;
2548  if (!next_head->is_CountedLoop()) {
2549    return;
2550  }
2551
2552  CountedLoopNode* main_head = next_head->as_CountedLoop();
2553  if (!main_head->is_main_loop()) {
2554    return;
2555  }
2556
2557  assert(locate_pre_from_main(main_head) == cl, "bad main loop");
2558  Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0);
2559
2560  // Remove the Opaque1Node of the pre loop and make it execute all iterations
2561  phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
2562  // Remove the Opaque1Node of the main loop so it can be optimized out
2563  Node* main_cmp = main_iff->in(1)->in(1);
2564  assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?");
2565  phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1));
2566}
2567
2568//------------------------------policy_do_remove_empty_loop--------------------
2569// Micro-benchmark spamming.  Policy is to always remove empty loops.
2570// The 'DO' part is to replace the trip counter with the value it will
2571// have on the last iteration.  This will break the loop.
2572bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2573  // Minimum size must be empty loop
2574  if (_body.size() > EMPTY_LOOP_SIZE)
2575    return false;
2576
2577  if (!_head->is_CountedLoop())
2578    return false;     // Dead loop
2579  CountedLoopNode *cl = _head->as_CountedLoop();
2580  if (!cl->is_valid_counted_loop())
2581    return false; // Malformed loop
2582  if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2583    return false;             // Infinite loop
2584
2585  if (cl->is_pre_loop()) {
2586    // If the loop we are removing is a pre-loop then the main and
2587    // post loop can be removed as well
2588    remove_main_post_loops(cl, phase);
2589  }
2590
2591#ifdef ASSERT
2592  // Ensure only one phi which is the iv.
2593  Node* iv = NULL;
2594  for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2595    Node* n = cl->fast_out(i);
2596    if (n->Opcode() == Op_Phi) {
2597      assert(iv == NULL, "Too many phis" );
2598      iv = n;
2599    }
2600  }
2601  assert(iv == cl->phi(), "Wrong phi" );
2602#endif
2603
2604  // main and post loops have explicitly created zero trip guard
2605  bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2606  if (needs_guard) {
2607    // Skip guard if values not overlap.
2608    const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
2609    const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
2610    int  stride_con = cl->stride_con();
2611    if (stride_con > 0) {
2612      needs_guard = (init_t->_hi >= limit_t->_lo);
2613    } else {
2614      needs_guard = (init_t->_lo <= limit_t->_hi);
2615    }
2616  }
2617  if (needs_guard) {
2618    // Check for an obvious zero trip guard.
2619    Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
2620    if (inctrl->Opcode() == Op_IfTrue || inctrl->Opcode() == Op_IfFalse) {
2621      bool maybe_swapped = (inctrl->Opcode() == Op_IfFalse);
2622      // The test should look like just the backedge of a CountedLoop
2623      Node* iff = inctrl->in(0);
2624      if (iff->is_If()) {
2625        Node* bol = iff->in(1);
2626        if (bol->is_Bool()) {
2627          BoolTest test = bol->as_Bool()->_test;
2628          if (maybe_swapped) {
2629            test._test = test.commute();
2630            test._test = test.negate();
2631          }
2632          if (test._test == cl->loopexit()->test_trip()) {
2633            Node* cmp = bol->in(1);
2634            int init_idx = maybe_swapped ? 2 : 1;
2635            int limit_idx = maybe_swapped ? 1 : 2;
2636            if (cmp->is_Cmp() && cmp->in(init_idx) == cl->init_trip() && cmp->in(limit_idx) == cl->limit()) {
2637              needs_guard = false;
2638            }
2639          }
2640        }
2641      }
2642    }
2643  }
2644
2645#ifndef PRODUCT
2646  if (PrintOpto) {
2647    tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
2648    this->dump_head();
2649  } else if (TraceLoopOpts) {
2650    tty->print("Empty with%s zero trip guard   ", needs_guard ? "out" : "");
2651    this->dump_head();
2652  }
2653#endif
2654
2655  if (needs_guard) {
2656    // Peel the loop to ensure there's a zero trip guard
2657    Node_List old_new;
2658    phase->do_peeling(this, old_new);
2659  }
2660
2661  // Replace the phi at loop head with the final value of the last
2662  // iteration.  Then the CountedLoopEnd will collapse (backedge never
2663  // taken) and all loop-invariant uses of the exit values will be correct.
2664  Node *phi = cl->phi();
2665  Node *exact_limit = phase->exact_limit(this);
2666  if (exact_limit != cl->limit()) {
2667    // We also need to replace the original limit to collapse loop exit.
2668    Node* cmp = cl->loopexit()->cmp_node();
2669    assert(cl->limit() == cmp->in(2), "sanity");
2670    phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
2671    phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
2672  }
2673  // Note: the final value after increment should not overflow since
2674  // counted loop has limit check predicate.
2675  Node *final = new SubINode( exact_limit, cl->stride() );
2676  phase->register_new_node(final,cl->in(LoopNode::EntryControl));
2677  phase->_igvn.replace_node(phi,final);
2678  phase->C->set_major_progress();
2679  return true;
2680}
2681
2682//------------------------------policy_do_one_iteration_loop-------------------
2683// Convert one iteration loop into normal code.
2684bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
2685  if (!_head->as_Loop()->is_valid_counted_loop())
2686    return false; // Only for counted loop
2687
2688  CountedLoopNode *cl = _head->as_CountedLoop();
2689  if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
2690    return false;
2691  }
2692
2693#ifndef PRODUCT
2694  if(TraceLoopOpts) {
2695    tty->print("OneIteration ");
2696    this->dump_head();
2697  }
2698#endif
2699
2700  Node *init_n = cl->init_trip();
2701#ifdef ASSERT
2702  // Loop boundaries should be constant since trip count is exact.
2703  assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
2704#endif
2705  // Replace the phi at loop head with the value of the init_trip.
2706  // Then the CountedLoopEnd will collapse (backedge will not be taken)
2707  // and all loop-invariant uses of the exit values will be correct.
2708  phase->_igvn.replace_node(cl->phi(), cl->init_trip());
2709  phase->C->set_major_progress();
2710  return true;
2711}
2712
2713//=============================================================================
2714//------------------------------iteration_split_impl---------------------------
2715bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
2716  // Compute loop trip count if possible.
2717  compute_trip_count(phase);
2718
2719  // Convert one iteration loop into normal code.
2720  if (policy_do_one_iteration_loop(phase))
2721    return true;
2722
2723  // Check and remove empty loops (spam micro-benchmarks)
2724  if (policy_do_remove_empty_loop(phase))
2725    return true;  // Here we removed an empty loop
2726
2727  bool should_peel = policy_peeling(phase); // Should we peel?
2728
2729  bool should_unswitch = policy_unswitching(phase);
2730
2731  // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2732  // This removes loop-invariant tests (usually null checks).
2733  if (!_head->is_CountedLoop()) { // Non-counted loop
2734    if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2735      // Partial peel succeeded so terminate this round of loop opts
2736      return false;
2737    }
2738    if (should_peel) {            // Should we peel?
2739      if (PrintOpto) { tty->print_cr("should_peel"); }
2740      phase->do_peeling(this,old_new);
2741    } else if (should_unswitch) {
2742      phase->do_unswitching(this, old_new);
2743    }
2744    return true;
2745  }
2746  CountedLoopNode *cl = _head->as_CountedLoop();
2747
2748  if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
2749
2750  // Do nothing special to pre- and post- loops
2751  if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2752
2753  // Compute loop trip count from profile data
2754  compute_profile_trip_cnt(phase);
2755
2756  // Before attempting fancy unrolling, RCE or alignment, see if we want
2757  // to completely unroll this loop or do loop unswitching.
2758  if (cl->is_normal_loop()) {
2759    if (should_unswitch) {
2760      phase->do_unswitching(this, old_new);
2761      return true;
2762    }
2763    bool should_maximally_unroll =  policy_maximally_unroll(phase);
2764    if (should_maximally_unroll) {
2765      // Here we did some unrolling and peeling.  Eventually we will
2766      // completely unroll this loop and it will no longer be a loop.
2767      phase->do_maximally_unroll(this,old_new);
2768      return true;
2769    }
2770  }
2771
2772  // Skip next optimizations if running low on nodes. Note that
2773  // policy_unswitching and policy_maximally_unroll have this check.
2774  int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
2775  if ((int)(2 * _body.size()) > nodes_left) {
2776    return true;
2777  }
2778
2779  // Counted loops may be peeled, may need some iterations run up
2780  // front for RCE, and may want to align loop refs to a cache
2781  // line.  Thus we clone a full loop up front whose trip count is
2782  // at least 1 (if peeling), but may be several more.
2783
2784  // The main loop will start cache-line aligned with at least 1
2785  // iteration of the unrolled body (zero-trip test required) and
2786  // will have some range checks removed.
2787
2788  // A post-loop will finish any odd iterations (leftover after
2789  // unrolling), plus any needed for RCE purposes.
2790
2791  bool should_unroll = policy_unroll(phase);
2792
2793  bool should_rce = policy_range_check(phase);
2794
2795  bool should_align = policy_align(phase);
2796
2797  // If not RCE'ing (iteration splitting) or Aligning, then we do not
2798  // need a pre-loop.  We may still need to peel an initial iteration but
2799  // we will not be needing an unknown number of pre-iterations.
2800  //
2801  // Basically, if may_rce_align reports FALSE first time through,
2802  // we will not be able to later do RCE or Aligning on this loop.
2803  bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
2804
2805  // If we have any of these conditions (RCE, alignment, unrolling) met, then
2806  // we switch to the pre-/main-/post-loop model.  This model also covers
2807  // peeling.
2808  if (should_rce || should_align || should_unroll) {
2809    if (cl->is_normal_loop())  // Convert to 'pre/main/post' loops
2810      phase->insert_pre_post_loops(this,old_new, !may_rce_align);
2811
2812    // Adjust the pre- and main-loop limits to let the pre and post loops run
2813    // with full checks, but the main-loop with no checks.  Remove said
2814    // checks from the main body.
2815    if (should_rce) {
2816      if (phase->do_range_check(this, old_new) != 0) {
2817        cl->mark_has_range_checks();
2818      }
2819    } else if (PostLoopMultiversioning) {
2820      phase->has_range_checks(this);
2821    }
2822
2823    if (should_unroll && !should_peel && PostLoopMultiversioning) {
2824      // Try to setup multiversioning on main loops before they are unrolled
2825      if (cl->is_main_loop() && (cl->unrolled_count() == 1)) {
2826        phase->insert_scalar_rced_post_loop(this, old_new);
2827      }
2828    }
2829
2830    // Double loop body for unrolling.  Adjust the minimum-trip test (will do
2831    // twice as many iterations as before) and the main body limit (only do
2832    // an even number of trips).  If we are peeling, we might enable some RCE
2833    // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
2834    // peeling.
2835    if (should_unroll && !should_peel) {
2836      if (SuperWordLoopUnrollAnalysis) {
2837        phase->insert_vector_post_loop(this, old_new);
2838      }
2839      phase->do_unroll(this, old_new, true);
2840    }
2841
2842    // Adjust the pre-loop limits to align the main body
2843    // iterations.
2844    if (should_align)
2845      Unimplemented();
2846
2847  } else {                      // Else we have an unchanged counted loop
2848    if (should_peel)           // Might want to peel but do nothing else
2849      phase->do_peeling(this,old_new);
2850  }
2851  return true;
2852}
2853
2854
2855//=============================================================================
2856//------------------------------iteration_split--------------------------------
2857bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
2858  // Recursively iteration split nested loops
2859  if (_child && !_child->iteration_split(phase, old_new))
2860    return false;
2861
2862  // Clean out prior deadwood
2863  DCE_loop_body();
2864
2865
2866  // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
2867  // Replace with a 1-in-10 exit guess.
2868  if (_parent /*not the root loop*/ &&
2869      !_irreducible &&
2870      // Also ignore the occasional dead backedge
2871      !tail()->is_top()) {
2872    adjust_loop_exit_prob(phase);
2873  }
2874
2875  // Gate unrolling, RCE and peeling efforts.
2876  if (!_child &&                // If not an inner loop, do not split
2877      !_irreducible &&
2878      _allow_optimizations &&
2879      !tail()->is_top()) {     // Also ignore the occasional dead backedge
2880    if (!_has_call) {
2881        if (!iteration_split_impl(phase, old_new)) {
2882          return false;
2883        }
2884    } else if (policy_unswitching(phase)) {
2885      phase->do_unswitching(this, old_new);
2886    }
2887  }
2888
2889  // Minor offset re-organization to remove loop-fallout uses of
2890  // trip counter when there was no major reshaping.
2891  phase->reorg_offsets(this);
2892
2893  if (_next && !_next->iteration_split(phase, old_new))
2894    return false;
2895  return true;
2896}
2897
2898
2899//=============================================================================
2900// Process all the loops in the loop tree and replace any fill
2901// patterns with an intrinsic version.
2902bool PhaseIdealLoop::do_intrinsify_fill() {
2903  bool changed = false;
2904  for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2905    IdealLoopTree* lpt = iter.current();
2906    changed |= intrinsify_fill(lpt);
2907  }
2908  return changed;
2909}
2910
2911
2912// Examine an inner loop looking for a a single store of an invariant
2913// value in a unit stride loop,
2914bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2915                                     Node*& shift, Node*& con) {
2916  const char* msg = NULL;
2917  Node* msg_node = NULL;
2918
2919  store_value = NULL;
2920  con = NULL;
2921  shift = NULL;
2922
2923  // Process the loop looking for stores.  If there are multiple
2924  // stores or extra control flow give at this point.
2925  CountedLoopNode* head = lpt->_head->as_CountedLoop();
2926  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2927    Node* n = lpt->_body.at(i);
2928    if (n->outcnt() == 0) continue; // Ignore dead
2929    if (n->is_Store()) {
2930      if (store != NULL) {
2931        msg = "multiple stores";
2932        break;
2933      }
2934      int opc = n->Opcode();
2935      if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
2936        msg = "oop fills not handled";
2937        break;
2938      }
2939      Node* value = n->in(MemNode::ValueIn);
2940      if (!lpt->is_invariant(value)) {
2941        msg  = "variant store value";
2942      } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2943        msg = "not array address";
2944      }
2945      store = n;
2946      store_value = value;
2947    } else if (n->is_If() && n != head->loopexit()) {
2948      msg = "extra control flow";
2949      msg_node = n;
2950    }
2951  }
2952
2953  if (store == NULL) {
2954    // No store in loop
2955    return false;
2956  }
2957
2958  if (msg == NULL && head->stride_con() != 1) {
2959    // could handle negative strides too
2960    if (head->stride_con() < 0) {
2961      msg = "negative stride";
2962    } else {
2963      msg = "non-unit stride";
2964    }
2965  }
2966
2967  if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2968    msg = "can't handle store address";
2969    msg_node = store->in(MemNode::Address);
2970  }
2971
2972  if (msg == NULL &&
2973      (!store->in(MemNode::Memory)->is_Phi() ||
2974       store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2975    msg = "store memory isn't proper phi";
2976    msg_node = store->in(MemNode::Memory);
2977  }
2978
2979  // Make sure there is an appropriate fill routine
2980  BasicType t = store->as_Mem()->memory_type();
2981  const char* fill_name;
2982  if (msg == NULL &&
2983      StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2984    msg = "unsupported store";
2985    msg_node = store;
2986  }
2987
2988  if (msg != NULL) {
2989#ifndef PRODUCT
2990    if (TraceOptimizeFill) {
2991      tty->print_cr("not fill intrinsic candidate: %s", msg);
2992      if (msg_node != NULL) msg_node->dump();
2993    }
2994#endif
2995    return false;
2996  }
2997
2998  // Make sure the address expression can be handled.  It should be
2999  // head->phi * elsize + con.  head->phi might have a ConvI2L(CastII()).
3000  Node* elements[4];
3001  Node* cast = NULL;
3002  Node* conv = NULL;
3003  bool found_index = false;
3004  int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
3005  for (int e = 0; e < count; e++) {
3006    Node* n = elements[e];
3007    if (n->is_Con() && con == NULL) {
3008      con = n;
3009    } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
3010      Node* value = n->in(1);
3011#ifdef _LP64
3012      if (value->Opcode() == Op_ConvI2L) {
3013        conv = value;
3014        value = value->in(1);
3015      }
3016      if (value->Opcode() == Op_CastII &&
3017          value->as_CastII()->has_range_check()) {
3018        // Skip range check dependent CastII nodes
3019        cast = value;
3020        value = value->in(1);
3021      }
3022#endif
3023      if (value != head->phi()) {
3024        msg = "unhandled shift in address";
3025      } else {
3026        if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
3027          msg = "scale doesn't match";
3028        } else {
3029          found_index = true;
3030          shift = n;
3031        }
3032      }
3033    } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
3034      conv = n;
3035      n = n->in(1);
3036      if (n->Opcode() == Op_CastII &&
3037          n->as_CastII()->has_range_check()) {
3038        // Skip range check dependent CastII nodes
3039        cast = n;
3040        n = n->in(1);
3041      }
3042      if (n == head->phi()) {
3043        found_index = true;
3044      } else {
3045        msg = "unhandled input to ConvI2L";
3046      }
3047    } else if (n == head->phi()) {
3048      // no shift, check below for allowed cases
3049      found_index = true;
3050    } else {
3051      msg = "unhandled node in address";
3052      msg_node = n;
3053    }
3054  }
3055
3056  if (count == -1) {
3057    msg = "malformed address expression";
3058    msg_node = store;
3059  }
3060
3061  if (!found_index) {
3062    msg = "missing use of index";
3063  }
3064
3065  // byte sized items won't have a shift
3066  if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
3067    msg = "can't find shift";
3068    msg_node = store;
3069  }
3070
3071  if (msg != NULL) {
3072#ifndef PRODUCT
3073    if (TraceOptimizeFill) {
3074      tty->print_cr("not fill intrinsic: %s", msg);
3075      if (msg_node != NULL) msg_node->dump();
3076    }
3077#endif
3078    return false;
3079  }
3080
3081  // No make sure all the other nodes in the loop can be handled
3082  VectorSet ok(Thread::current()->resource_area());
3083
3084  // store related values are ok
3085  ok.set(store->_idx);
3086  ok.set(store->in(MemNode::Memory)->_idx);
3087
3088  CountedLoopEndNode* loop_exit = head->loopexit();
3089  guarantee(loop_exit != NULL, "no loop exit node");
3090
3091  // Loop structure is ok
3092  ok.set(head->_idx);
3093  ok.set(loop_exit->_idx);
3094  ok.set(head->phi()->_idx);
3095  ok.set(head->incr()->_idx);
3096  ok.set(loop_exit->cmp_node()->_idx);
3097  ok.set(loop_exit->in(1)->_idx);
3098
3099  // Address elements are ok
3100  if (con)   ok.set(con->_idx);
3101  if (shift) ok.set(shift->_idx);
3102  if (cast)  ok.set(cast->_idx);
3103  if (conv)  ok.set(conv->_idx);
3104
3105  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
3106    Node* n = lpt->_body.at(i);
3107    if (n->outcnt() == 0) continue; // Ignore dead
3108    if (ok.test(n->_idx)) continue;
3109    // Backedge projection is ok
3110    if (n->is_IfTrue() && n->in(0) == loop_exit) continue;
3111    if (!n->is_AddP()) {
3112      msg = "unhandled node";
3113      msg_node = n;
3114      break;
3115    }
3116  }
3117
3118  // Make sure no unexpected values are used outside the loop
3119  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
3120    Node* n = lpt->_body.at(i);
3121    // These values can be replaced with other nodes if they are used
3122    // outside the loop.
3123    if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue;
3124    for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
3125      Node* use = iter.get();
3126      if (!lpt->_body.contains(use)) {
3127        msg = "node is used outside loop";
3128        // lpt->_body.dump();
3129        msg_node = n;
3130        break;
3131      }
3132    }
3133  }
3134
3135#ifdef ASSERT
3136  if (TraceOptimizeFill) {
3137    if (msg != NULL) {
3138      tty->print_cr("no fill intrinsic: %s", msg);
3139      if (msg_node != NULL) msg_node->dump();
3140    } else {
3141      tty->print_cr("fill intrinsic for:");
3142    }
3143    store->dump();
3144    if (Verbose) {
3145      lpt->_body.dump();
3146    }
3147  }
3148#endif
3149
3150  return msg == NULL;
3151}
3152
3153
3154
3155bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
3156  // Only for counted inner loops
3157  if (!lpt->is_counted() || !lpt->is_inner()) {
3158    return false;
3159  }
3160
3161  // Must have constant stride
3162  CountedLoopNode* head = lpt->_head->as_CountedLoop();
3163  if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
3164    return false;
3165  }
3166
3167  // Check that the body only contains a store of a loop invariant
3168  // value that is indexed by the loop phi.
3169  Node* store = NULL;
3170  Node* store_value = NULL;
3171  Node* shift = NULL;
3172  Node* offset = NULL;
3173  if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
3174    return false;
3175  }
3176
3177  Node* exit = head->loopexit()->proj_out(0);
3178  if (exit == NULL) {
3179    return false;
3180  }
3181
3182#ifndef PRODUCT
3183  if (TraceLoopOpts) {
3184    tty->print("ArrayFill    ");
3185    lpt->dump_head();
3186  }
3187#endif
3188
3189  // Now replace the whole loop body by a call to a fill routine that
3190  // covers the same region as the loop.
3191  Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
3192
3193  // Build an expression for the beginning of the copy region
3194  Node* index = head->init_trip();
3195#ifdef _LP64
3196  index = new ConvI2LNode(index);
3197  _igvn.register_new_node_with_optimizer(index);
3198#endif
3199  if (shift != NULL) {
3200    // byte arrays don't require a shift but others do.
3201    index = new LShiftXNode(index, shift->in(2));
3202    _igvn.register_new_node_with_optimizer(index);
3203  }
3204  index = new AddPNode(base, base, index);
3205  _igvn.register_new_node_with_optimizer(index);
3206  Node* from = new AddPNode(base, index, offset);
3207  _igvn.register_new_node_with_optimizer(from);
3208  // Compute the number of elements to copy
3209  Node* len = new SubINode(head->limit(), head->init_trip());
3210  _igvn.register_new_node_with_optimizer(len);
3211
3212  BasicType t = store->as_Mem()->memory_type();
3213  bool aligned = false;
3214  if (offset != NULL && head->init_trip()->is_Con()) {
3215    int element_size = type2aelembytes(t);
3216    aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
3217  }
3218
3219  // Build a call to the fill routine
3220  const char* fill_name;
3221  address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
3222  assert(fill != NULL, "what?");
3223
3224  // Convert float/double to int/long for fill routines
3225  if (t == T_FLOAT) {
3226    store_value = new MoveF2INode(store_value);
3227    _igvn.register_new_node_with_optimizer(store_value);
3228  } else if (t == T_DOUBLE) {
3229    store_value = new MoveD2LNode(store_value);
3230    _igvn.register_new_node_with_optimizer(store_value);
3231  }
3232
3233  Node* mem_phi = store->in(MemNode::Memory);
3234  Node* result_ctrl;
3235  Node* result_mem;
3236  const TypeFunc* call_type = OptoRuntime::array_fill_Type();
3237  CallLeafNode *call = new CallLeafNoFPNode(call_type, fill,
3238                                            fill_name, TypeAryPtr::get_array_body_type(t));
3239  uint cnt = 0;
3240  call->init_req(TypeFunc::Parms + cnt++, from);
3241  call->init_req(TypeFunc::Parms + cnt++, store_value);
3242#ifdef _LP64
3243  len = new ConvI2LNode(len);
3244  _igvn.register_new_node_with_optimizer(len);
3245#endif
3246  call->init_req(TypeFunc::Parms + cnt++, len);
3247#ifdef _LP64
3248  call->init_req(TypeFunc::Parms + cnt++, C->top());
3249#endif
3250  call->init_req(TypeFunc::Control,   head->init_control());
3251  call->init_req(TypeFunc::I_O,       C->top());       // Does no I/O.
3252  call->init_req(TypeFunc::Memory,    mem_phi->in(LoopNode::EntryControl));
3253  call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
3254  call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
3255  _igvn.register_new_node_with_optimizer(call);
3256  result_ctrl = new ProjNode(call,TypeFunc::Control);
3257  _igvn.register_new_node_with_optimizer(result_ctrl);
3258  result_mem = new ProjNode(call,TypeFunc::Memory);
3259  _igvn.register_new_node_with_optimizer(result_mem);
3260
3261/* Disable following optimization until proper fix (add missing checks).
3262
3263  // If this fill is tightly coupled to an allocation and overwrites
3264  // the whole body, allow it to take over the zeroing.
3265  AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
3266  if (alloc != NULL && alloc->is_AllocateArray()) {
3267    Node* length = alloc->as_AllocateArray()->Ideal_length();
3268    if (head->limit() == length &&
3269        head->init_trip() == _igvn.intcon(0)) {
3270      if (TraceOptimizeFill) {
3271        tty->print_cr("Eliminated zeroing in allocation");
3272      }
3273      alloc->maybe_set_complete(&_igvn);
3274    } else {
3275#ifdef ASSERT
3276      if (TraceOptimizeFill) {
3277        tty->print_cr("filling array but bounds don't match");
3278        alloc->dump();
3279        head->init_trip()->dump();
3280        head->limit()->dump();
3281        length->dump();
3282      }
3283#endif
3284    }
3285  }
3286*/
3287
3288  // Redirect the old control and memory edges that are outside the loop.
3289  // Sometimes the memory phi of the head is used as the outgoing
3290  // state of the loop.  It's safe in this case to replace it with the
3291  // result_mem.
3292  _igvn.replace_node(store->in(MemNode::Memory), result_mem);
3293  lazy_replace(exit, result_ctrl);
3294  _igvn.replace_node(store, result_mem);
3295  // Any uses the increment outside of the loop become the loop limit.
3296  _igvn.replace_node(head->incr(), head->limit());
3297
3298  // Disconnect the head from the loop.
3299  for (uint i = 0; i < lpt->_body.size(); i++) {
3300    Node* n = lpt->_body.at(i);
3301    _igvn.replace_node(n, C->top());
3302  }
3303
3304  return true;
3305}
3306