1/*
2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileLog.hpp"
27#include "memory/allocation.inline.hpp"
28#include "opto/addnode.hpp"
29#include "opto/callnode.hpp"
30#include "opto/castnode.hpp"
31#include "opto/connode.hpp"
32#include "opto/convertnode.hpp"
33#include "opto/divnode.hpp"
34#include "opto/loopnode.hpp"
35#include "opto/mulnode.hpp"
36#include "opto/movenode.hpp"
37#include "opto/opaquenode.hpp"
38#include "opto/rootnode.hpp"
39#include "opto/runtime.hpp"
40#include "opto/subnode.hpp"
41#include "opto/superword.hpp"
42#include "opto/vectornode.hpp"
43
44//------------------------------is_loop_exit-----------------------------------
45// Given an IfNode, return the loop-exiting projection or NULL if both
46// arms remain in the loop.
47Node *IdealLoopTree::is_loop_exit(Node *iff) const {
48  if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
49  PhaseIdealLoop *phase = _phase;
50  // Test is an IfNode, has 2 projections.  If BOTH are in the loop
51  // we need loop unswitching instead of peeling.
52  if( !is_member(phase->get_loop( iff->raw_out(0) )) )
53    return iff->raw_out(0);
54  if( !is_member(phase->get_loop( iff->raw_out(1) )) )
55    return iff->raw_out(1);
56  return NULL;
57}
58
59
60//=============================================================================
61
62
63//------------------------------record_for_igvn----------------------------
64// Put loop body on igvn work list
65void IdealLoopTree::record_for_igvn() {
66  for( uint i = 0; i < _body.size(); i++ ) {
67    Node *n = _body.at(i);
68    _phase->_igvn._worklist.push(n);
69  }
70}
71
72//------------------------------compute_exact_trip_count-----------------------
73// Compute loop trip count if possible. Do not recalculate trip count for
74// split loops (pre-main-post) which have their limits and inits behind Opaque node.
75void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) {
76  if (!_head->as_Loop()->is_valid_counted_loop()) {
77    return;
78  }
79  CountedLoopNode* cl = _head->as_CountedLoop();
80  // Trip count may become nonexact for iteration split loops since
81  // RCE modifies limits. Note, _trip_count value is not reset since
82  // it is used to limit unrolling of main loop.
83  cl->set_nonexact_trip_count();
84
85  // Loop's test should be part of loop.
86  if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
87    return; // Infinite loop
88
89#ifdef ASSERT
90  BoolTest::mask bt = cl->loopexit()->test_trip();
91  assert(bt == BoolTest::lt || bt == BoolTest::gt ||
92         bt == BoolTest::ne, "canonical test is expected");
93#endif
94
95  Node* init_n = cl->init_trip();
96  Node* limit_n = cl->limit();
97  if (init_n != NULL && limit_n != NULL) {
98    // Use longs to avoid integer overflow.
99    int stride_con = cl->stride_con();
100    jlong init_con = phase->_igvn.type(init_n)->is_int()->_lo;
101    jlong limit_con = phase->_igvn.type(limit_n)->is_int()->_hi;
102    int stride_m   = stride_con - (stride_con > 0 ? 1 : -1);
103    jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
104    if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
105      if (init_n->is_Con() && limit_n->is_Con()) {
106        // Set exact trip count.
107        cl->set_exact_trip_count((uint)trip_count);
108      } else if (cl->unrolled_count() == 1) {
109        // Set maximum trip count before unrolling.
110        cl->set_trip_count((uint)trip_count);
111      }
112    }
113  }
114}
115
116//------------------------------compute_profile_trip_cnt----------------------------
117// Compute loop trip count from profile data as
118//    (backedge_count + loop_exit_count) / loop_exit_count
119void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
120  if (!_head->is_CountedLoop()) {
121    return;
122  }
123  CountedLoopNode* head = _head->as_CountedLoop();
124  if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
125    return; // Already computed
126  }
127  float trip_cnt = (float)max_jint; // default is big
128
129  Node* back = head->in(LoopNode::LoopBackControl);
130  while (back != head) {
131    if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
132        back->in(0) &&
133        back->in(0)->is_If() &&
134        back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
135        back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
136      break;
137    }
138    back = phase->idom(back);
139  }
140  if (back != head) {
141    assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
142           back->in(0), "if-projection exists");
143    IfNode* back_if = back->in(0)->as_If();
144    float loop_back_cnt = back_if->_fcnt * back_if->_prob;
145
146    // Now compute a loop exit count
147    float loop_exit_cnt = 0.0f;
148    for( uint i = 0; i < _body.size(); i++ ) {
149      Node *n = _body[i];
150      if( n->is_If() ) {
151        IfNode *iff = n->as_If();
152        if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
153          Node *exit = is_loop_exit(iff);
154          if( exit ) {
155            float exit_prob = iff->_prob;
156            if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
157            if (exit_prob > PROB_MIN) {
158              float exit_cnt = iff->_fcnt * exit_prob;
159              loop_exit_cnt += exit_cnt;
160            }
161          }
162        }
163      }
164    }
165    if (loop_exit_cnt > 0.0f) {
166      trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
167    } else {
168      // No exit count so use
169      trip_cnt = loop_back_cnt;
170    }
171  }
172#ifndef PRODUCT
173  if (TraceProfileTripCount) {
174    tty->print_cr("compute_profile_trip_cnt  lp: %d cnt: %f\n", head->_idx, trip_cnt);
175  }
176#endif
177  head->set_profile_trip_cnt(trip_cnt);
178}
179
180//---------------------is_invariant_addition-----------------------------
181// Return nonzero index of invariant operand for an Add or Sub
182// of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
183int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
184  int op = n->Opcode();
185  if (op == Op_AddI || op == Op_SubI) {
186    bool in1_invar = this->is_invariant(n->in(1));
187    bool in2_invar = this->is_invariant(n->in(2));
188    if (in1_invar && !in2_invar) return 1;
189    if (!in1_invar && in2_invar) return 2;
190  }
191  return 0;
192}
193
194//---------------------reassociate_add_sub-----------------------------
195// Reassociate invariant add and subtract expressions:
196//
197// inv1 + (x + inv2)  =>  ( inv1 + inv2) + x
198// (x + inv2) + inv1  =>  ( inv1 + inv2) + x
199// inv1 + (x - inv2)  =>  ( inv1 - inv2) + x
200// inv1 - (inv2 - x)  =>  ( inv1 - inv2) + x
201// (x + inv2) - inv1  =>  (-inv1 + inv2) + x
202// (x - inv2) + inv1  =>  ( inv1 - inv2) + x
203// (x - inv2) - inv1  =>  (-inv1 - inv2) + x
204// inv1 + (inv2 - x)  =>  ( inv1 + inv2) - x
205// inv1 - (x - inv2)  =>  ( inv1 + inv2) - x
206// (inv2 - x) + inv1  =>  ( inv1 + inv2) - x
207// (inv2 - x) - inv1  =>  (-inv1 + inv2) - x
208// inv1 - (x + inv2)  =>  ( inv1 - inv2) - x
209//
210Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
211  if ((!n1->is_Add() && !n1->is_Sub()) || n1->outcnt() == 0) return NULL;
212  if (is_invariant(n1)) return NULL;
213  int inv1_idx = is_invariant_addition(n1, phase);
214  if (!inv1_idx) return NULL;
215  // Don't mess with add of constant (igvn moves them to expression tree root.)
216  if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
217  Node* inv1 = n1->in(inv1_idx);
218  Node* n2 = n1->in(3 - inv1_idx);
219  int inv2_idx = is_invariant_addition(n2, phase);
220  if (!inv2_idx) return NULL;
221  Node* x    = n2->in(3 - inv2_idx);
222  Node* inv2 = n2->in(inv2_idx);
223
224  bool neg_x    = n2->is_Sub() && inv2_idx == 1;
225  bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
226  bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
227  if (n1->is_Sub() && inv1_idx == 1) {
228    neg_x    = !neg_x;
229    neg_inv2 = !neg_inv2;
230  }
231  Node* inv1_c = phase->get_ctrl(inv1);
232  Node* inv2_c = phase->get_ctrl(inv2);
233  Node* n_inv1;
234  if (neg_inv1) {
235    Node *zero = phase->_igvn.intcon(0);
236    phase->set_ctrl(zero, phase->C->root());
237    n_inv1 = new SubINode(zero, inv1);
238    phase->register_new_node(n_inv1, inv1_c);
239  } else {
240    n_inv1 = inv1;
241  }
242  Node* inv;
243  if (neg_inv2) {
244    inv = new SubINode(n_inv1, inv2);
245  } else {
246    inv = new AddINode(n_inv1, inv2);
247  }
248  phase->register_new_node(inv, phase->get_early_ctrl(inv));
249
250  Node* addx;
251  if (neg_x) {
252    addx = new SubINode(inv, x);
253  } else {
254    addx = new AddINode(x, inv);
255  }
256  phase->register_new_node(addx, phase->get_ctrl(x));
257  phase->_igvn.replace_node(n1, addx);
258  assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
259  _body.yank(n1);
260  return addx;
261}
262
263//---------------------reassociate_invariants-----------------------------
264// Reassociate invariant expressions:
265void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
266  for (int i = _body.size() - 1; i >= 0; i--) {
267    Node *n = _body.at(i);
268    for (int j = 0; j < 5; j++) {
269      Node* nn = reassociate_add_sub(n, phase);
270      if (nn == NULL) break;
271      n = nn; // again
272    };
273  }
274}
275
276//------------------------------policy_peeling---------------------------------
277// Return TRUE or FALSE if the loop should be peeled or not.  Peel if we can
278// make some loop-invariant test (usually a null-check) happen before the loop.
279bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
280  Node *test = ((IdealLoopTree*)this)->tail();
281  int  body_size = ((IdealLoopTree*)this)->_body.size();
282  // Peeling does loop cloning which can result in O(N^2) node construction
283  if( body_size > 255 /* Prevent overflow for large body_size */
284      || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) {
285    return false;           // too large to safely clone
286  }
287
288  // check for vectorized loops, any peeling done was already applied
289  if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
290
291  while( test != _head ) {      // Scan till run off top of loop
292    if( test->is_If() ) {       // Test?
293      Node *ctrl = phase->get_ctrl(test->in(1));
294      if (ctrl->is_top())
295        return false;           // Found dead test on live IF?  No peeling!
296      // Standard IF only has one input value to check for loop invariance
297      assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
298      // Condition is not a member of this loop?
299      if( !is_member(phase->get_loop(ctrl)) &&
300          is_loop_exit(test) )
301        return true;            // Found reason to peel!
302    }
303    // Walk up dominators to loop _head looking for test which is
304    // executed on every path thru loop.
305    test = phase->idom(test);
306  }
307  return false;
308}
309
310//------------------------------peeled_dom_test_elim---------------------------
311// If we got the effect of peeling, either by actually peeling or by making
312// a pre-loop which must execute at least once, we can remove all
313// loop-invariant dominated tests in the main body.
314void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
315  bool progress = true;
316  while( progress ) {
317    progress = false;           // Reset for next iteration
318    Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
319    Node *test = prev->in(0);
320    while( test != loop->_head ) { // Scan till run off top of loop
321
322      int p_op = prev->Opcode();
323      if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
324          test->is_If() &&      // Test?
325          !test->in(1)->is_Con() && // And not already obvious?
326          // Condition is not a member of this loop?
327          !loop->is_member(get_loop(get_ctrl(test->in(1))))){
328        // Walk loop body looking for instances of this test
329        for( uint i = 0; i < loop->_body.size(); i++ ) {
330          Node *n = loop->_body.at(i);
331          if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
332            // IfNode was dominated by version in peeled loop body
333            progress = true;
334            dominated_by( old_new[prev->_idx], n );
335          }
336        }
337      }
338      prev = test;
339      test = idom(test);
340    } // End of scan tests in loop
341
342  } // End of while( progress )
343}
344
345//------------------------------do_peeling-------------------------------------
346// Peel the first iteration of the given loop.
347// Step 1: Clone the loop body.  The clone becomes the peeled iteration.
348//         The pre-loop illegally has 2 control users (old & new loops).
349// Step 2: Make the old-loop fall-in edges point to the peeled iteration.
350//         Do this by making the old-loop fall-in edges act as if they came
351//         around the loopback from the prior iteration (follow the old-loop
352//         backedges) and then map to the new peeled iteration.  This leaves
353//         the pre-loop with only 1 user (the new peeled iteration), but the
354//         peeled-loop backedge has 2 users.
355// Step 3: Cut the backedge on the clone (so its not a loop) and remove the
356//         extra backedge user.
357//
358//                   orig
359//
360//                  stmt1
361//                    |
362//                    v
363//              loop predicate
364//                    |
365//                    v
366//                   loop<----+
367//                     |      |
368//                   stmt2    |
369//                     |      |
370//                     v      |
371//                    if      ^
372//                   / \      |
373//                  /   \     |
374//                 v     v    |
375//               false true   |
376//               /       \    |
377//              /         ----+
378//             |
379//             v
380//           exit
381//
382//
383//            after clone loop
384//
385//                   stmt1
386//                     |
387//                     v
388//               loop predicate
389//                 /       \
390//        clone   /         \   orig
391//               /           \
392//              /             \
393//             v               v
394//   +---->loop clone          loop<----+
395//   |      |                    |      |
396//   |    stmt2 clone          stmt2    |
397//   |      |                    |      |
398//   |      v                    v      |
399//   ^      if clone            If      ^
400//   |      / \                / \      |
401//   |     /   \              /   \     |
402//   |    v     v            v     v    |
403//   |    true  false      false true   |
404//   |    /         \      /       \    |
405//   +----           \    /         ----+
406//                    \  /
407//                    1v v2
408//                  region
409//                     |
410//                     v
411//                   exit
412//
413//
414//         after peel and predicate move
415//
416//                   stmt1
417//                    /
418//                   /
419//        clone     /            orig
420//                 /
421//                /              +----------+
422//               /               |          |
423//              /          loop predicate   |
424//             /                 |          |
425//            v                  v          |
426//   TOP-->loop clone          loop<----+   |
427//          |                    |      |   |
428//        stmt2 clone          stmt2    |   |
429//          |                    |      |   ^
430//          v                    v      |   |
431//          if clone            If      ^   |
432//          / \                / \      |   |
433//         /   \              /   \     |   |
434//        v     v            v     v    |   |
435//      true   false      false  true   |   |
436//        |         \      /       \    |   |
437//        |          \    /         ----+   ^
438//        |           \  /                  |
439//        |           1v v2                 |
440//        v         region                  |
441//        |            |                    |
442//        |            v                    |
443//        |          exit                   |
444//        |                                 |
445//        +--------------->-----------------+
446//
447//
448//              final graph
449//
450//                  stmt1
451//                    |
452//                    v
453//                  stmt2 clone
454//                    |
455//                    v
456//                   if clone
457//                  / |
458//                 /  |
459//                v   v
460//            false  true
461//             |      |
462//             |      v
463//             | loop predicate
464//             |      |
465//             |      v
466//             |     loop<----+
467//             |      |       |
468//             |    stmt2     |
469//             |      |       |
470//             |      v       |
471//             v      if      ^
472//             |     /  \     |
473//             |    /    \    |
474//             |   v     v    |
475//             | false  true  |
476//             |  |        \  |
477//             v  v         --+
478//            region
479//              |
480//              v
481//             exit
482//
483void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
484
485  C->set_major_progress();
486  // Peeling a 'main' loop in a pre/main/post situation obfuscates the
487  // 'pre' loop from the main and the 'pre' can no longer have its
488  // iterations adjusted.  Therefore, we need to declare this loop as
489  // no longer a 'main' loop; it will need new pre and post loops before
490  // we can do further RCE.
491#ifndef PRODUCT
492  if (TraceLoopOpts) {
493    tty->print("Peel         ");
494    loop->dump_head();
495  }
496#endif
497  Node* head = loop->_head;
498  bool counted_loop = head->is_CountedLoop();
499  if (counted_loop) {
500    CountedLoopNode *cl = head->as_CountedLoop();
501    assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
502    cl->set_trip_count(cl->trip_count() - 1);
503    if (cl->is_main_loop()) {
504      cl->set_normal_loop();
505#ifndef PRODUCT
506      if (PrintOpto && VerifyLoopOptimizations) {
507        tty->print("Peeling a 'main' loop; resetting to 'normal' ");
508        loop->dump_head();
509      }
510#endif
511    }
512  }
513  Node* entry = head->in(LoopNode::EntryControl);
514
515  // Step 1: Clone the loop body.  The clone becomes the peeled iteration.
516  //         The pre-loop illegally has 2 control users (old & new loops).
517  clone_loop( loop, old_new, dom_depth(head) );
518
519  // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
520  //         Do this by making the old-loop fall-in edges act as if they came
521  //         around the loopback from the prior iteration (follow the old-loop
522  //         backedges) and then map to the new peeled iteration.  This leaves
523  //         the pre-loop with only 1 user (the new peeled iteration), but the
524  //         peeled-loop backedge has 2 users.
525  Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx];
526  _igvn.hash_delete(head);
527  head->set_req(LoopNode::EntryControl, new_entry);
528  for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
529    Node* old = head->fast_out(j);
530    if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
531      Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
532      if (!new_exit_value )     // Backedge value is ALSO loop invariant?
533        // Then loop body backedge value remains the same.
534        new_exit_value = old->in(LoopNode::LoopBackControl);
535      _igvn.hash_delete(old);
536      old->set_req(LoopNode::EntryControl, new_exit_value);
537    }
538  }
539
540
541  // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
542  //         extra backedge user.
543  Node* new_head = old_new[head->_idx];
544  _igvn.hash_delete(new_head);
545  new_head->set_req(LoopNode::LoopBackControl, C->top());
546  for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
547    Node* use = new_head->fast_out(j2);
548    if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
549      _igvn.hash_delete(use);
550      use->set_req(LoopNode::LoopBackControl, C->top());
551    }
552  }
553
554
555  // Step 4: Correct dom-depth info.  Set to loop-head depth.
556  int dd = dom_depth(head);
557  set_idom(head, head->in(1), dd);
558  for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
559    Node *old = loop->_body.at(j3);
560    Node *nnn = old_new[old->_idx];
561    if (!has_ctrl(nnn))
562      set_idom(nnn, idom(nnn), dd-1);
563  }
564
565  // Now force out all loop-invariant dominating tests.  The optimizer
566  // finds some, but we _know_ they are all useless.
567  peeled_dom_test_elim(loop,old_new);
568
569  loop->record_for_igvn();
570}
571
572#define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
573
574//------------------------------policy_maximally_unroll------------------------
575// Calculate exact loop trip count and return true if loop can be maximally
576// unrolled.
577bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
578  CountedLoopNode *cl = _head->as_CountedLoop();
579  assert(cl->is_normal_loop(), "");
580  if (!cl->is_valid_counted_loop())
581    return false; // Malformed counted loop
582
583  if (!cl->has_exact_trip_count()) {
584    // Trip count is not exact.
585    return false;
586  }
587
588  uint trip_count = cl->trip_count();
589  // Note, max_juint is used to indicate unknown trip count.
590  assert(trip_count > 1, "one iteration loop should be optimized out already");
591  assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
592
593  // Real policy: if we maximally unroll, does it get too big?
594  // Allow the unrolled mess to get larger than standard loop
595  // size.  After all, it will no longer be a loop.
596  uint body_size    = _body.size();
597  uint unroll_limit = (uint)LoopUnrollLimit * 4;
598  assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
599  if (trip_count > unroll_limit || body_size > unroll_limit) {
600    return false;
601  }
602
603  // Fully unroll a loop with few iterations regardless next
604  // conditions since following loop optimizations will split
605  // such loop anyway (pre-main-post).
606  if (trip_count <= 3)
607    return true;
608
609  // Take into account that after unroll conjoined heads and tails will fold,
610  // otherwise policy_unroll() may allow more unrolling than max unrolling.
611  uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
612  uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
613  if (body_size != tst_body_size) // Check for int overflow
614    return false;
615  if (new_body_size > unroll_limit ||
616      // Unrolling can result in a large amount of node construction
617      new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) {
618    return false;
619  }
620
621  // Do not unroll a loop with String intrinsics code.
622  // String intrinsics are large and have loops.
623  for (uint k = 0; k < _body.size(); k++) {
624    Node* n = _body.at(k);
625    switch (n->Opcode()) {
626      case Op_StrComp:
627      case Op_StrEquals:
628      case Op_StrIndexOf:
629      case Op_StrIndexOfChar:
630      case Op_EncodeISOArray:
631      case Op_AryEq:
632      case Op_HasNegatives: {
633        return false;
634      }
635#if INCLUDE_RTM_OPT
636      case Op_FastLock:
637      case Op_FastUnlock: {
638        // Don't unroll RTM locking code because it is large.
639        if (UseRTMLocking) {
640          return false;
641        }
642      }
643#endif
644    } // switch
645  }
646
647  return true; // Do maximally unroll
648}
649
650
651//------------------------------policy_unroll----------------------------------
652// Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
653// the loop is a CountedLoop and the body is small enough.
654bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
655
656  CountedLoopNode *cl = _head->as_CountedLoop();
657  assert(cl->is_normal_loop() || cl->is_main_loop(), "");
658
659  if (!cl->is_valid_counted_loop())
660    return false; // Malformed counted loop
661
662  // Protect against over-unrolling.
663  // After split at least one iteration will be executed in pre-loop.
664  if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
665
666  _local_loop_unroll_limit = LoopUnrollLimit;
667  _local_loop_unroll_factor = 4;
668  int future_unroll_ct = cl->unrolled_count() * 2;
669  if (!cl->do_unroll_only()) {
670    if (future_unroll_ct > LoopMaxUnroll) return false;
671  } else {
672    // obey user constraints on vector mapped loops with additional unrolling applied
673    int unroll_constraint = (cl->slp_max_unroll()) ? cl->slp_max_unroll() : 1;
674    if ((future_unroll_ct / unroll_constraint) > LoopMaxUnroll) return false;
675  }
676
677  // Check for initial stride being a small enough constant
678  if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
679
680  // Don't unroll if the next round of unrolling would push us
681  // over the expected trip count of the loop.  One is subtracted
682  // from the expected trip count because the pre-loop normally
683  // executes 1 iteration.
684  if (UnrollLimitForProfileCheck > 0 &&
685      cl->profile_trip_cnt() != COUNT_UNKNOWN &&
686      future_unroll_ct        > UnrollLimitForProfileCheck &&
687      (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
688    return false;
689  }
690
691  // When unroll count is greater than LoopUnrollMin, don't unroll if:
692  //   the residual iterations are more than 10% of the trip count
693  //   and rounds of "unroll,optimize" are not making significant progress
694  //   Progress defined as current size less than 20% larger than previous size.
695  if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
696      future_unroll_ct > LoopUnrollMin &&
697      (future_unroll_ct - 1) * (100 / LoopPercentProfileLimit) > cl->profile_trip_cnt() &&
698      1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
699    return false;
700  }
701
702  Node *init_n = cl->init_trip();
703  Node *limit_n = cl->limit();
704  int stride_con = cl->stride_con();
705  // Non-constant bounds.
706  // Protect against over-unrolling when init or/and limit are not constant
707  // (so that trip_count's init value is maxint) but iv range is known.
708  if (init_n   == NULL || !init_n->is_Con()  ||
709      limit_n  == NULL || !limit_n->is_Con()) {
710    Node* phi = cl->phi();
711    if (phi != NULL) {
712      assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
713      const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
714      int next_stride = stride_con * 2; // stride after this unroll
715      if (next_stride > 0) {
716        if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
717            iv_type->_lo + next_stride >  iv_type->_hi) {
718          return false;  // over-unrolling
719        }
720      } else if (next_stride < 0) {
721        if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
722            iv_type->_hi + next_stride <  iv_type->_lo) {
723          return false;  // over-unrolling
724        }
725      }
726    }
727  }
728
729  // After unroll limit will be adjusted: new_limit = limit-stride.
730  // Bailout if adjustment overflow.
731  const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
732  if ((stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi)) ||
733      (stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)))
734    return false;  // overflow
735
736  // Adjust body_size to determine if we unroll or not
737  uint body_size = _body.size();
738  // Key test to unroll loop in CRC32 java code
739  int xors_in_loop = 0;
740  // Also count ModL, DivL and MulL which expand mightly
741  for (uint k = 0; k < _body.size(); k++) {
742    Node* n = _body.at(k);
743    switch (n->Opcode()) {
744      case Op_XorI: xors_in_loop++; break; // CRC32 java code
745      case Op_ModL: body_size += 30; break;
746      case Op_DivL: body_size += 30; break;
747      case Op_MulL: body_size += 10; break;
748      case Op_StrComp:
749      case Op_StrEquals:
750      case Op_StrIndexOf:
751      case Op_StrIndexOfChar:
752      case Op_EncodeISOArray:
753      case Op_AryEq:
754      case Op_HasNegatives: {
755        // Do not unroll a loop with String intrinsics code.
756        // String intrinsics are large and have loops.
757        return false;
758      }
759#if INCLUDE_RTM_OPT
760      case Op_FastLock:
761      case Op_FastUnlock: {
762        // Don't unroll RTM locking code because it is large.
763        if (UseRTMLocking) {
764          return false;
765        }
766      }
767#endif
768    } // switch
769  }
770
771  if (UseSuperWord) {
772    if (!cl->is_reduction_loop()) {
773      phase->mark_reductions(this);
774    }
775
776    // Only attempt slp analysis when user controls do not prohibit it
777    if (LoopMaxUnroll > _local_loop_unroll_factor) {
778      // Once policy_slp_analysis succeeds, mark the loop with the
779      // maximal unroll factor so that we minimize analysis passes
780      if (future_unroll_ct >= _local_loop_unroll_factor) {
781        policy_unroll_slp_analysis(cl, phase, future_unroll_ct);
782      }
783    }
784  }
785
786  int slp_max_unroll_factor = cl->slp_max_unroll();
787  if ((LoopMaxUnroll < slp_max_unroll_factor) && FLAG_IS_DEFAULT(LoopMaxUnroll) && UseSubwordForMaxVector) {
788    LoopMaxUnroll = slp_max_unroll_factor;
789  }
790  if (cl->has_passed_slp()) {
791    if (slp_max_unroll_factor >= future_unroll_ct) return true;
792    // Normal case: loop too big
793    return false;
794  }
795
796  // Check for being too big
797  if (body_size > (uint)_local_loop_unroll_limit) {
798    if ((UseSubwordForMaxVector || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) return true;
799    // Normal case: loop too big
800    return false;
801  }
802
803  if (cl->do_unroll_only()) {
804    if (TraceSuperWordLoopUnrollAnalysis) {
805      tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct);
806    }
807  }
808
809  // Unroll once!  (Each trip will soon do double iterations)
810  return true;
811}
812
813void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLoop *phase, int future_unroll_ct) {
814  // Enable this functionality target by target as needed
815  if (SuperWordLoopUnrollAnalysis) {
816    if (!cl->was_slp_analyzed()) {
817      SuperWord sw(phase);
818      sw.transform_loop(this, false);
819
820      // If the loop is slp canonical analyze it
821      if (sw.early_return() == false) {
822        sw.unrolling_analysis(_local_loop_unroll_factor);
823      }
824    }
825
826    if (cl->has_passed_slp()) {
827      int slp_max_unroll_factor = cl->slp_max_unroll();
828      if (slp_max_unroll_factor >= future_unroll_ct) {
829        int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
830        if (new_limit > LoopUnrollLimit) {
831          if (TraceSuperWordLoopUnrollAnalysis) {
832            tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit);
833          }
834          _local_loop_unroll_limit = new_limit;
835        }
836      }
837    }
838  }
839}
840
841//------------------------------policy_align-----------------------------------
842// Return TRUE or FALSE if the loop should be cache-line aligned.  Gather the
843// expression that does the alignment.  Note that only one array base can be
844// aligned in a loop (unless the VM guarantees mutual alignment).  Note that
845// if we vectorize short memory ops into longer memory ops, we may want to
846// increase alignment.
847bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
848  return false;
849}
850
851//------------------------------policy_range_check-----------------------------
852// Return TRUE or FALSE if the loop should be range-check-eliminated.
853// Actually we do iteration-splitting, a more powerful form of RCE.
854bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
855  if (!RangeCheckElimination) return false;
856
857  CountedLoopNode *cl = _head->as_CountedLoop();
858  // If we unrolled with no intention of doing RCE and we later
859  // changed our minds, we got no pre-loop.  Either we need to
860  // make a new pre-loop, or we gotta disallow RCE.
861  if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
862  Node *trip_counter = cl->phi();
863
864  // check for vectorized loops, some opts are no longer needed
865  if (cl->do_unroll_only()) return false;
866
867  // Check loop body for tests of trip-counter plus loop-invariant vs
868  // loop-invariant.
869  for (uint i = 0; i < _body.size(); i++) {
870    Node *iff = _body[i];
871    if (iff->Opcode() == Op_If ||
872        iff->Opcode() == Op_RangeCheck) { // Test?
873
874      // Comparing trip+off vs limit
875      Node *bol = iff->in(1);
876      if (bol->req() != 2) continue; // dead constant test
877      if (!bol->is_Bool()) {
878        assert(bol->Opcode() == Op_Conv2B, "predicate check only");
879        continue;
880      }
881      if (bol->as_Bool()->_test._test == BoolTest::ne)
882        continue; // not RC
883
884      Node *cmp = bol->in(1);
885      Node *rc_exp = cmp->in(1);
886      Node *limit = cmp->in(2);
887
888      Node *limit_c = phase->get_ctrl(limit);
889      if( limit_c == phase->C->top() )
890        return false;           // Found dead test on live IF?  No RCE!
891      if( is_member(phase->get_loop(limit_c) ) ) {
892        // Compare might have operands swapped; commute them
893        rc_exp = cmp->in(2);
894        limit  = cmp->in(1);
895        limit_c = phase->get_ctrl(limit);
896        if( is_member(phase->get_loop(limit_c) ) )
897          continue;             // Both inputs are loop varying; cannot RCE
898      }
899
900      if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
901        continue;
902      }
903      // Yeah!  Found a test like 'trip+off vs limit'
904      // Test is an IfNode, has 2 projections.  If BOTH are in the loop
905      // we need loop unswitching instead of iteration splitting.
906      if( is_loop_exit(iff) )
907        return true;            // Found reason to split iterations
908    } // End of is IF
909  }
910
911  return false;
912}
913
914//------------------------------policy_peel_only-------------------------------
915// Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned.  Useful
916// for unrolling loops with NO array accesses.
917bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
918  // check for vectorized loops, any peeling done was already applied
919  if (_head->is_CountedLoop() && _head->as_CountedLoop()->do_unroll_only()) return false;
920
921  for( uint i = 0; i < _body.size(); i++ )
922    if( _body[i]->is_Mem() )
923      return false;
924
925  // No memory accesses at all!
926  return true;
927}
928
929//------------------------------clone_up_backedge_goo--------------------------
930// If Node n lives in the back_ctrl block and cannot float, we clone a private
931// version of n in preheader_ctrl block and return that, otherwise return n.
932Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
933  if( get_ctrl(n) != back_ctrl ) return n;
934
935  // Only visit once
936  if (visited.test_set(n->_idx)) {
937    Node *x = clones.find(n->_idx);
938    if (x != NULL)
939      return x;
940    return n;
941  }
942
943  Node *x = NULL;               // If required, a clone of 'n'
944  // Check for 'n' being pinned in the backedge.
945  if( n->in(0) && n->in(0) == back_ctrl ) {
946    assert(clones.find(n->_idx) == NULL, "dead loop");
947    x = n->clone();             // Clone a copy of 'n' to preheader
948    clones.push(x, n->_idx);
949    x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
950  }
951
952  // Recursive fixup any other input edges into x.
953  // If there are no changes we can just return 'n', otherwise
954  // we need to clone a private copy and change it.
955  for( uint i = 1; i < n->req(); i++ ) {
956    Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
957    if( g != n->in(i) ) {
958      if( !x ) {
959        assert(clones.find(n->_idx) == NULL, "dead loop");
960        x = n->clone();
961        clones.push(x, n->_idx);
962      }
963      x->set_req(i, g);
964    }
965  }
966  if( x ) {                     // x can legally float to pre-header location
967    register_new_node( x, preheader_ctrl );
968    return x;
969  } else {                      // raise n to cover LCA of uses
970    set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
971  }
972  return n;
973}
974
975bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
976  Node* castii = new CastIINode(incr, TypeInt::INT, true);
977  castii->set_req(0, ctrl);
978  register_new_node(castii, ctrl);
979  for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) {
980    Node* n = incr->fast_out(i);
981    if (n->is_Phi() && n->in(0) == loop) {
982      int nrep = n->replace_edge(incr, castii);
983      return true;
984    }
985  }
986  return false;
987}
988
989//------------------------------insert_pre_post_loops--------------------------
990// Insert pre and post loops.  If peel_only is set, the pre-loop can not have
991// more iterations added.  It acts as a 'peel' only, no lower-bound RCE, no
992// alignment.  Useful to unroll loops that do no array accesses.
993void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
994
995#ifndef PRODUCT
996  if (TraceLoopOpts) {
997    if (peel_only)
998      tty->print("PeelMainPost ");
999    else
1000      tty->print("PreMainPost  ");
1001    loop->dump_head();
1002  }
1003#endif
1004  C->set_major_progress();
1005
1006  // Find common pieces of the loop being guarded with pre & post loops
1007  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1008  assert( main_head->is_normal_loop(), "" );
1009  CountedLoopEndNode *main_end = main_head->loopexit();
1010  guarantee(main_end != NULL, "no loop exit node");
1011  assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
1012  uint dd_main_head = dom_depth(main_head);
1013  uint max = main_head->outcnt();
1014
1015  Node *pre_header= main_head->in(LoopNode::EntryControl);
1016  Node *init      = main_head->init_trip();
1017  Node *incr      = main_end ->incr();
1018  Node *limit     = main_end ->limit();
1019  Node *stride    = main_end ->stride();
1020  Node *cmp       = main_end ->cmp_node();
1021  BoolTest::mask b_test = main_end->test_trip();
1022
1023  // Need only 1 user of 'bol' because I will be hacking the loop bounds.
1024  Node *bol = main_end->in(CountedLoopEndNode::TestValue);
1025  if( bol->outcnt() != 1 ) {
1026    bol = bol->clone();
1027    register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
1028    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, bol);
1029  }
1030  // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
1031  if( cmp->outcnt() != 1 ) {
1032    cmp = cmp->clone();
1033    register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
1034    _igvn.replace_input_of(bol, 1, cmp);
1035  }
1036
1037  // Add the post loop
1038  CountedLoopNode *post_head = NULL;
1039  Node *main_exit = insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1040
1041  //------------------------------
1042  // Step B: Create Pre-Loop.
1043
1044  // Step B1: Clone the loop body.  The clone becomes the pre-loop.  The main
1045  // loop pre-header illegally has 2 control users (old & new loops).
1046  clone_loop( loop, old_new, dd_main_head );
1047  CountedLoopNode*    pre_head = old_new[main_head->_idx]->as_CountedLoop();
1048  CountedLoopEndNode* pre_end  = old_new[main_end ->_idx]->as_CountedLoopEnd();
1049  pre_head->set_pre_loop(main_head);
1050  Node *pre_incr = old_new[incr->_idx];
1051
1052  // Reduce the pre-loop trip count.
1053  pre_end->_prob = PROB_FAIR;
1054
1055  // Find the pre-loop normal exit.
1056  Node* pre_exit = pre_end->proj_out(false);
1057  assert( pre_exit->Opcode() == Op_IfFalse, "" );
1058  IfFalseNode *new_pre_exit = new IfFalseNode(pre_end);
1059  _igvn.register_new_node_with_optimizer( new_pre_exit );
1060  set_idom(new_pre_exit, pre_end, dd_main_head);
1061  set_loop(new_pre_exit, loop->_parent);
1062
1063  // Step B2: Build a zero-trip guard for the main-loop.  After leaving the
1064  // pre-loop, the main-loop may not execute at all.  Later in life this
1065  // zero-trip guard will become the minimum-trip guard when we unroll
1066  // the main-loop.
1067  Node *min_opaq = new Opaque1Node(C, limit);
1068  Node *min_cmp  = new CmpINode( pre_incr, min_opaq );
1069  Node *min_bol  = new BoolNode( min_cmp, b_test );
1070  register_new_node( min_opaq, new_pre_exit );
1071  register_new_node( min_cmp , new_pre_exit );
1072  register_new_node( min_bol , new_pre_exit );
1073
1074  // Build the IfNode (assume the main-loop is executed always).
1075  IfNode *min_iff = new IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
1076  _igvn.register_new_node_with_optimizer( min_iff );
1077  set_idom(min_iff, new_pre_exit, dd_main_head);
1078  set_loop(min_iff, loop->_parent);
1079
1080  // Plug in the false-path, taken if we need to skip main-loop
1081  _igvn.hash_delete( pre_exit );
1082  pre_exit->set_req(0, min_iff);
1083  set_idom(pre_exit, min_iff, dd_main_head);
1084  set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
1085  // Make the true-path, must enter the main loop
1086  Node *min_taken = new IfTrueNode( min_iff );
1087  _igvn.register_new_node_with_optimizer( min_taken );
1088  set_idom(min_taken, min_iff, dd_main_head);
1089  set_loop(min_taken, loop->_parent);
1090  // Plug in the true path
1091  _igvn.hash_delete( main_head );
1092  main_head->set_req(LoopNode::EntryControl, min_taken);
1093  set_idom(main_head, min_taken, dd_main_head);
1094
1095  Arena *a = Thread::current()->resource_area();
1096  VectorSet visited(a);
1097  Node_Stack clones(a, main_head->back_control()->outcnt());
1098  // Step B3: Make the fall-in values to the main-loop come from the
1099  // fall-out values of the pre-loop.
1100  for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
1101    Node* main_phi = main_head->fast_out(i2);
1102    if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
1103      Node *pre_phi = old_new[main_phi->_idx];
1104      Node *fallpre  = clone_up_backedge_goo(pre_head->back_control(),
1105                                             main_head->init_control(),
1106                                             pre_phi->in(LoopNode::LoopBackControl),
1107                                             visited, clones);
1108      _igvn.hash_delete(main_phi);
1109      main_phi->set_req( LoopNode::EntryControl, fallpre );
1110    }
1111  }
1112
1113  // Nodes inside the loop may be control dependent on a predicate
1114  // that was moved before the preloop. If the back branch of the main
1115  // or post loops becomes dead, those nodes won't be dependent on the
1116  // test that guards that loop nest anymore which could lead to an
1117  // incorrect array access because it executes independently of the
1118  // test that was guarding the loop nest. We add a special CastII on
1119  // the if branch that enters the loop, between the input induction
1120  // variable value and the induction variable Phi to preserve correct
1121  // dependencies.
1122
1123  // CastII for the main loop:
1124  bool inserted = cast_incr_before_loop( pre_incr, min_taken, main_head );
1125  assert(inserted, "no castII inserted");
1126
1127  // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1128  // RCE and alignment may change this later.
1129  Node *cmp_end = pre_end->cmp_node();
1130  assert( cmp_end->in(2) == limit, "" );
1131  Node *pre_limit = new AddINode( init, stride );
1132
1133  // Save the original loop limit in this Opaque1 node for
1134  // use by range check elimination.
1135  Node *pre_opaq  = new Opaque1Node(C, pre_limit, limit);
1136
1137  register_new_node( pre_limit, pre_head->in(0) );
1138  register_new_node( pre_opaq , pre_head->in(0) );
1139
1140  // Since no other users of pre-loop compare, I can hack limit directly
1141  assert( cmp_end->outcnt() == 1, "no other users" );
1142  _igvn.hash_delete(cmp_end);
1143  cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1144
1145  // Special case for not-equal loop bounds:
1146  // Change pre loop test, main loop test, and the
1147  // main loop guard test to use lt or gt depending on stride
1148  // direction:
1149  // positive stride use <
1150  // negative stride use >
1151  //
1152  // not-equal test is kept for post loop to handle case
1153  // when init > limit when stride > 0 (and reverse).
1154
1155  if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1156
1157    BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1158    // Modify pre loop end condition
1159    Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1160    BoolNode* new_bol0 = new BoolNode(pre_bol->in(1), new_test);
1161    register_new_node( new_bol0, pre_head->in(0) );
1162    _igvn.replace_input_of(pre_end, CountedLoopEndNode::TestValue, new_bol0);
1163    // Modify main loop guard condition
1164    assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1165    BoolNode* new_bol1 = new BoolNode(min_bol->in(1), new_test);
1166    register_new_node( new_bol1, new_pre_exit );
1167    _igvn.hash_delete(min_iff);
1168    min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1169    // Modify main loop end condition
1170    BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1171    BoolNode* new_bol2 = new BoolNode(main_bol->in(1), new_test);
1172    register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1173    _igvn.replace_input_of(main_end, CountedLoopEndNode::TestValue, new_bol2);
1174  }
1175
1176  // Flag main loop
1177  main_head->set_main_loop();
1178  if( peel_only ) main_head->set_main_no_pre_loop();
1179
1180  // Subtract a trip count for the pre-loop.
1181  main_head->set_trip_count(main_head->trip_count() - 1);
1182
1183  // It's difficult to be precise about the trip-counts
1184  // for the pre/post loops.  They are usually very short,
1185  // so guess that 4 trips is a reasonable value.
1186  post_head->set_profile_trip_cnt(4.0);
1187  pre_head->set_profile_trip_cnt(4.0);
1188
1189  // Now force out all loop-invariant dominating tests.  The optimizer
1190  // finds some, but we _know_ they are all useless.
1191  peeled_dom_test_elim(loop,old_new);
1192  loop->record_for_igvn();
1193}
1194
1195//------------------------------insert_vector_post_loop------------------------
1196// Insert a copy of the atomic unrolled vectorized main loop as a post loop,
1197// unroll_policy has already informed us that more unrolling is about to happen to
1198// the main loop.  The resultant post loop will serve as a vectorized drain loop.
1199void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new) {
1200  if (!loop->_head->is_CountedLoop()) return;
1201
1202  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1203
1204  // only process vectorized main loops
1205  if (!cl->is_vectorized_loop() || !cl->is_main_loop()) return;
1206
1207  int slp_max_unroll_factor = cl->slp_max_unroll();
1208  int cur_unroll = cl->unrolled_count();
1209
1210  if (slp_max_unroll_factor == 0) return;
1211
1212  // only process atomic unroll vector loops (not super unrolled after vectorization)
1213  if (cur_unroll != slp_max_unroll_factor) return;
1214
1215  // we only ever process this one time
1216  if (cl->has_atomic_post_loop()) return;
1217
1218#ifndef PRODUCT
1219  if (TraceLoopOpts) {
1220    tty->print("PostVector  ");
1221    loop->dump_head();
1222  }
1223#endif
1224  C->set_major_progress();
1225
1226  // Find common pieces of the loop being guarded with pre & post loops
1227  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1228  CountedLoopEndNode *main_end = main_head->loopexit();
1229  guarantee(main_end != NULL, "no loop exit node");
1230  // diagnostic to show loop end is not properly formed
1231  assert(main_end->outcnt() == 2, "1 true, 1 false path only");
1232
1233  // mark this loop as processed
1234  main_head->mark_has_atomic_post_loop();
1235
1236  Node *incr = main_end->incr();
1237  Node *limit = main_end->limit();
1238
1239  // In this case we throw away the result as we are not using it to connect anything else.
1240  CountedLoopNode *post_head = NULL;
1241  insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1242
1243  // It's difficult to be precise about the trip-counts
1244  // for post loops.  They are usually very short,
1245  // so guess that unit vector trips is a reasonable value.
1246  post_head->set_profile_trip_cnt(cur_unroll);
1247
1248  // Now force out all loop-invariant dominating tests.  The optimizer
1249  // finds some, but we _know_ they are all useless.
1250  peeled_dom_test_elim(loop, old_new);
1251  loop->record_for_igvn();
1252}
1253
1254
1255//-------------------------insert_scalar_rced_post_loop------------------------
1256// Insert a copy of the rce'd main loop as a post loop,
1257// We have not unrolled the main loop, so this is the right time to inject this.
1258// Later we will examine the partner of this post loop pair which still has range checks
1259// to see inject code which tests at runtime if the range checks are applicable.
1260void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List &old_new) {
1261  if (!loop->_head->is_CountedLoop()) return;
1262
1263  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1264
1265  // only process RCE'd main loops
1266  if (!cl->is_main_loop() || cl->range_checks_present()) return;
1267
1268#ifndef PRODUCT
1269  if (TraceLoopOpts) {
1270    tty->print("PostScalarRce  ");
1271    loop->dump_head();
1272  }
1273#endif
1274  C->set_major_progress();
1275
1276  // Find common pieces of the loop being guarded with pre & post loops
1277  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
1278  CountedLoopEndNode *main_end = main_head->loopexit();
1279  guarantee(main_end != NULL, "no loop exit node");
1280  // diagnostic to show loop end is not properly formed
1281  assert(main_end->outcnt() == 2, "1 true, 1 false path only");
1282
1283  Node *incr = main_end->incr();
1284  Node *limit = main_end->limit();
1285
1286  // In this case we throw away the result as we are not using it to connect anything else.
1287  CountedLoopNode *post_head = NULL;
1288  insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head);
1289
1290  // It's difficult to be precise about the trip-counts
1291  // for post loops.  They are usually very short,
1292  // so guess that unit vector trips is a reasonable value.
1293  post_head->set_profile_trip_cnt(4.0);
1294  post_head->set_is_rce_post_loop();
1295
1296  // Now force out all loop-invariant dominating tests.  The optimizer
1297  // finds some, but we _know_ they are all useless.
1298  peeled_dom_test_elim(loop, old_new);
1299  loop->record_for_igvn();
1300}
1301
1302
1303//------------------------------insert_post_loop-------------------------------
1304// Insert post loops.  Add a post loop to the given loop passed.
1305Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree *loop, Node_List &old_new,
1306                                       CountedLoopNode *main_head, CountedLoopEndNode *main_end,
1307                                       Node *incr, Node *limit, CountedLoopNode *&post_head) {
1308
1309  //------------------------------
1310  // Step A: Create a new post-Loop.
1311  Node* main_exit = main_end->proj_out(false);
1312  assert(main_exit->Opcode() == Op_IfFalse, "");
1313  int dd_main_exit = dom_depth(main_exit);
1314
1315  // Step A1: Clone the loop body of main. The clone becomes the post-loop.
1316  // The main loop pre-header illegally has 2 control users (old & new loops).
1317  clone_loop(loop, old_new, dd_main_exit);
1318  assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, "");
1319  post_head = old_new[main_head->_idx]->as_CountedLoop();
1320  post_head->set_normal_loop();
1321  post_head->set_post_loop(main_head);
1322
1323  // Reduce the post-loop trip count.
1324  CountedLoopEndNode* post_end = old_new[main_end->_idx]->as_CountedLoopEnd();
1325  post_end->_prob = PROB_FAIR;
1326
1327  // Build the main-loop normal exit.
1328  IfFalseNode *new_main_exit = new IfFalseNode(main_end);
1329  _igvn.register_new_node_with_optimizer(new_main_exit);
1330  set_idom(new_main_exit, main_end, dd_main_exit);
1331  set_loop(new_main_exit, loop->_parent);
1332
1333  // Step A2: Build a zero-trip guard for the post-loop.  After leaving the
1334  // main-loop, the post-loop may not execute at all.  We 'opaque' the incr
1335  // (the previous loop trip-counter exit value) because we will be changing
1336  // the exit value (via additional unrolling) so we cannot constant-fold away the zero
1337  // trip guard until all unrolling is done.
1338  Node *zer_opaq = new Opaque1Node(C, incr);
1339  Node *zer_cmp = new CmpINode(zer_opaq, limit);
1340  Node *zer_bol = new BoolNode(zer_cmp, main_end->test_trip());
1341  register_new_node(zer_opaq, new_main_exit);
1342  register_new_node(zer_cmp, new_main_exit);
1343  register_new_node(zer_bol, new_main_exit);
1344
1345  // Build the IfNode
1346  IfNode *zer_iff = new IfNode(new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN);
1347  _igvn.register_new_node_with_optimizer(zer_iff);
1348  set_idom(zer_iff, new_main_exit, dd_main_exit);
1349  set_loop(zer_iff, loop->_parent);
1350
1351  // Plug in the false-path, taken if we need to skip this post-loop
1352  _igvn.replace_input_of(main_exit, 0, zer_iff);
1353  set_idom(main_exit, zer_iff, dd_main_exit);
1354  set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
1355  // Make the true-path, must enter this post loop
1356  Node *zer_taken = new IfTrueNode(zer_iff);
1357  _igvn.register_new_node_with_optimizer(zer_taken);
1358  set_idom(zer_taken, zer_iff, dd_main_exit);
1359  set_loop(zer_taken, loop->_parent);
1360  // Plug in the true path
1361  _igvn.hash_delete(post_head);
1362  post_head->set_req(LoopNode::EntryControl, zer_taken);
1363  set_idom(post_head, zer_taken, dd_main_exit);
1364
1365  Arena *a = Thread::current()->resource_area();
1366  VectorSet visited(a);
1367  Node_Stack clones(a, main_head->back_control()->outcnt());
1368  // Step A3: Make the fall-in values to the post-loop come from the
1369  // fall-out values of the main-loop.
1370  for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
1371    Node* main_phi = main_head->fast_out(i);
1372    if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0) {
1373      Node *cur_phi = old_new[main_phi->_idx];
1374      Node *fallnew = clone_up_backedge_goo(main_head->back_control(),
1375                                            post_head->init_control(),
1376                                            main_phi->in(LoopNode::LoopBackControl),
1377                                            visited, clones);
1378      _igvn.hash_delete(cur_phi);
1379      cur_phi->set_req(LoopNode::EntryControl, fallnew);
1380    }
1381  }
1382
1383  // CastII for the new post loop:
1384  bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
1385  assert(inserted, "no castII inserted");
1386
1387  return new_main_exit;
1388}
1389
1390//------------------------------is_invariant-----------------------------
1391// Return true if n is invariant
1392bool IdealLoopTree::is_invariant(Node* n) const {
1393  Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1394  if (n_c->is_top()) return false;
1395  return !is_member(_phase->get_loop(n_c));
1396}
1397
1398
1399//------------------------------do_unroll--------------------------------------
1400// Unroll the loop body one step - make each trip do 2 iterations.
1401void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1402  assert(LoopUnrollLimit, "");
1403  CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1404  CountedLoopEndNode *loop_end = loop_head->loopexit();
1405  assert(loop_end, "");
1406#ifndef PRODUCT
1407  if (PrintOpto && VerifyLoopOptimizations) {
1408    tty->print("Unrolling ");
1409    loop->dump_head();
1410  } else if (TraceLoopOpts) {
1411    if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1412      tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1413    } else {
1414      tty->print("Unroll %d     ", loop_head->unrolled_count()*2);
1415    }
1416    loop->dump_head();
1417  }
1418
1419  if (C->do_vector_loop() && (PrintOpto && (VerifyLoopOptimizations || TraceLoopOpts))) {
1420    Arena* arena = Thread::current()->resource_area();
1421    Node_Stack stack(arena, C->live_nodes() >> 2);
1422    Node_List rpo_list;
1423    VectorSet visited(arena);
1424    visited.set(loop_head->_idx);
1425    rpo( loop_head, stack, visited, rpo_list );
1426    dump(loop, rpo_list.size(), rpo_list );
1427  }
1428#endif
1429
1430  // Remember loop node count before unrolling to detect
1431  // if rounds of unroll,optimize are making progress
1432  loop_head->set_node_count_before_unroll(loop->_body.size());
1433
1434  Node *ctrl  = loop_head->in(LoopNode::EntryControl);
1435  Node *limit = loop_head->limit();
1436  Node *init  = loop_head->init_trip();
1437  Node *stride = loop_head->stride();
1438
1439  Node *opaq = NULL;
1440  if (adjust_min_trip) {       // If not maximally unrolling, need adjustment
1441    // Search for zero-trip guard.
1442
1443    // Check the shape of the graph at the loop entry. If an inappropriate
1444    // graph shape is encountered, the compiler bails out loop unrolling;
1445    // compilation of the method will still succeed.
1446    if (!is_canonical_loop_entry(loop_head)) {
1447      return;
1448    }
1449    opaq = ctrl->in(0)->in(1)->in(1)->in(2);
1450    // Zero-trip test uses an 'opaque' node which is not shared.
1451    assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
1452  }
1453
1454  C->set_major_progress();
1455
1456  Node* new_limit = NULL;
1457  int stride_con = stride->get_int();
1458  int stride_p = (stride_con > 0) ? stride_con : -stride_con;
1459  uint old_trip_count = loop_head->trip_count();
1460  // Verify that unroll policy result is still valid.
1461  assert(old_trip_count > 1 &&
1462      (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
1463
1464  // Adjust loop limit to keep valid iterations number after unroll.
1465  // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
1466  // which may overflow.
1467  if (!adjust_min_trip) {
1468    assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
1469        "odd trip count for maximally unroll");
1470    // Don't need to adjust limit for maximally unroll since trip count is even.
1471  } else if (loop_head->has_exact_trip_count() && init->is_Con()) {
1472    // Loop's limit is constant. Loop's init could be constant when pre-loop
1473    // become peeled iteration.
1474    jlong init_con = init->get_int();
1475    // We can keep old loop limit if iterations count stays the same:
1476    //   old_trip_count == new_trip_count * 2
1477    // Note: since old_trip_count >= 2 then new_trip_count >= 1
1478    // so we also don't need to adjust zero trip test.
1479    jlong limit_con  = limit->get_int();
1480    // (stride_con*2) not overflow since stride_con <= 8.
1481    int new_stride_con = stride_con * 2;
1482    int stride_m    = new_stride_con - (stride_con > 0 ? 1 : -1);
1483    jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con;
1484    // New trip count should satisfy next conditions.
1485    assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
1486    uint new_trip_count = (uint)trip_count;
1487    adjust_min_trip = (old_trip_count != new_trip_count*2);
1488  }
1489
1490  if (adjust_min_trip) {
1491    // Step 2: Adjust the trip limit if it is called for.
1492    // The adjustment amount is -stride. Need to make sure if the
1493    // adjustment underflows or overflows, then the main loop is skipped.
1494    Node* cmp = loop_end->cmp_node();
1495    assert(cmp->in(2) == limit, "sanity");
1496    assert(opaq != NULL && opaq->in(1) == limit, "sanity");
1497
1498    // Verify that policy_unroll result is still valid.
1499    const TypeInt* limit_type = _igvn.type(limit)->is_int();
1500    assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
1501        stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
1502
1503    if (limit->is_Con()) {
1504      // The check in policy_unroll and the assert above guarantee
1505      // no underflow if limit is constant.
1506      new_limit = _igvn.intcon(limit->get_int() - stride_con);
1507      set_ctrl(new_limit, C->root());
1508    } else {
1509      // Limit is not constant.
1510      if (loop_head->unrolled_count() == 1) { // only for first unroll
1511        // Separate limit by Opaque node in case it is an incremented
1512        // variable from previous loop to avoid using pre-incremented
1513        // value which could increase register pressure.
1514        // Otherwise reorg_offsets() optimization will create a separate
1515        // Opaque node for each use of trip-counter and as result
1516        // zero trip guard limit will be different from loop limit.
1517        assert(has_ctrl(opaq), "should have it");
1518        Node* opaq_ctrl = get_ctrl(opaq);
1519        limit = new Opaque2Node( C, limit );
1520        register_new_node( limit, opaq_ctrl );
1521      }
1522      if ((stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo)) ||
1523          (stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi))) {
1524        // No underflow.
1525        new_limit = new SubINode(limit, stride);
1526      } else {
1527        // (limit - stride) may underflow.
1528        // Clamp the adjustment value with MININT or MAXINT:
1529        //
1530        //   new_limit = limit-stride
1531        //   if (stride > 0)
1532        //     new_limit = (limit < new_limit) ? MININT : new_limit;
1533        //   else
1534        //     new_limit = (limit > new_limit) ? MAXINT : new_limit;
1535        //
1536        BoolTest::mask bt = loop_end->test_trip();
1537        assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
1538        Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
1539        set_ctrl(adj_max, C->root());
1540        Node* old_limit = NULL;
1541        Node* adj_limit = NULL;
1542        Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
1543        if (loop_head->unrolled_count() > 1 &&
1544            limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
1545            limit->in(CMoveNode::IfTrue) == adj_max &&
1546            bol->as_Bool()->_test._test == bt &&
1547            bol->in(1)->Opcode() == Op_CmpI &&
1548            bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
1549          // Loop was unrolled before.
1550          // Optimize the limit to avoid nested CMove:
1551          // use original limit as old limit.
1552          old_limit = bol->in(1)->in(1);
1553          // Adjust previous adjusted limit.
1554          adj_limit = limit->in(CMoveNode::IfFalse);
1555          adj_limit = new SubINode(adj_limit, stride);
1556        } else {
1557          old_limit = limit;
1558          adj_limit = new SubINode(limit, stride);
1559        }
1560        assert(old_limit != NULL && adj_limit != NULL, "");
1561        register_new_node( adj_limit, ctrl ); // adjust amount
1562        Node* adj_cmp = new CmpINode(old_limit, adj_limit);
1563        register_new_node( adj_cmp, ctrl );
1564        Node* adj_bool = new BoolNode(adj_cmp, bt);
1565        register_new_node( adj_bool, ctrl );
1566        new_limit = new CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
1567      }
1568      register_new_node(new_limit, ctrl);
1569    }
1570    assert(new_limit != NULL, "");
1571    // Replace in loop test.
1572    assert(loop_end->in(1)->in(1) == cmp, "sanity");
1573    if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
1574      // Don't need to create new test since only one user.
1575      _igvn.hash_delete(cmp);
1576      cmp->set_req(2, new_limit);
1577    } else {
1578      // Create new test since it is shared.
1579      Node* ctrl2 = loop_end->in(0);
1580      Node* cmp2  = cmp->clone();
1581      cmp2->set_req(2, new_limit);
1582      register_new_node(cmp2, ctrl2);
1583      Node* bol2 = loop_end->in(1)->clone();
1584      bol2->set_req(1, cmp2);
1585      register_new_node(bol2, ctrl2);
1586      _igvn.replace_input_of(loop_end, 1, bol2);
1587    }
1588    // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1589    // Make it a 1-trip test (means at least 2 trips).
1590
1591    // Guard test uses an 'opaque' node which is not shared.  Hence I
1592    // can edit it's inputs directly.  Hammer in the new limit for the
1593    // minimum-trip guard.
1594    assert(opaq->outcnt() == 1, "");
1595    _igvn.replace_input_of(opaq, 1, new_limit);
1596  }
1597
1598  // Adjust max trip count. The trip count is intentionally rounded
1599  // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1600  // the main, unrolled, part of the loop will never execute as it is protected
1601  // by the min-trip test.  See bug 4834191 for a case where we over-unrolled
1602  // and later determined that part of the unrolled loop was dead.
1603  loop_head->set_trip_count(old_trip_count / 2);
1604
1605  // Double the count of original iterations in the unrolled loop body.
1606  loop_head->double_unrolled_count();
1607
1608  // ---------
1609  // Step 4: Clone the loop body.  Move it inside the loop.  This loop body
1610  // represents the odd iterations; since the loop trips an even number of
1611  // times its backedge is never taken.  Kill the backedge.
1612  uint dd = dom_depth(loop_head);
1613  clone_loop( loop, old_new, dd );
1614
1615  // Make backedges of the clone equal to backedges of the original.
1616  // Make the fall-in from the original come from the fall-out of the clone.
1617  for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1618    Node* phi = loop_head->fast_out(j);
1619    if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1620      Node *newphi = old_new[phi->_idx];
1621      _igvn.hash_delete( phi );
1622      _igvn.hash_delete( newphi );
1623
1624      phi   ->set_req(LoopNode::   EntryControl, newphi->in(LoopNode::LoopBackControl));
1625      newphi->set_req(LoopNode::LoopBackControl, phi   ->in(LoopNode::LoopBackControl));
1626      phi   ->set_req(LoopNode::LoopBackControl, C->top());
1627    }
1628  }
1629  Node *clone_head = old_new[loop_head->_idx];
1630  _igvn.hash_delete( clone_head );
1631  loop_head ->set_req(LoopNode::   EntryControl, clone_head->in(LoopNode::LoopBackControl));
1632  clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1633  loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1634  loop->_head = clone_head;     // New loop header
1635
1636  set_idom(loop_head,  loop_head ->in(LoopNode::EntryControl), dd);
1637  set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1638
1639  // Kill the clone's backedge
1640  Node *newcle = old_new[loop_end->_idx];
1641  _igvn.hash_delete( newcle );
1642  Node *one = _igvn.intcon(1);
1643  set_ctrl(one, C->root());
1644  newcle->set_req(1, one);
1645  // Force clone into same loop body
1646  uint max = loop->_body.size();
1647  for( uint k = 0; k < max; k++ ) {
1648    Node *old = loop->_body.at(k);
1649    Node *nnn = old_new[old->_idx];
1650    loop->_body.push(nnn);
1651    if (!has_ctrl(old))
1652      set_loop(nnn, loop);
1653  }
1654
1655  loop->record_for_igvn();
1656
1657#ifndef PRODUCT
1658  if (C->do_vector_loop() && (PrintOpto && (VerifyLoopOptimizations || TraceLoopOpts))) {
1659    tty->print("\nnew loop after unroll\n");       loop->dump_head();
1660    for (uint i = 0; i < loop->_body.size(); i++) {
1661      loop->_body.at(i)->dump();
1662    }
1663    if(C->clone_map().is_debug()) {
1664      tty->print("\nCloneMap\n");
1665      Dict* dict = C->clone_map().dict();
1666      DictI i(dict);
1667      tty->print_cr("Dict@%p[%d] = ", dict, dict->Size());
1668      for (int ii = 0; i.test(); ++i, ++ii) {
1669        NodeCloneInfo cl((uint64_t)dict->operator[]((void*)i._key));
1670        tty->print("%d->%d:%d,", (int)(intptr_t)i._key, cl.idx(), cl.gen());
1671        if (ii % 10 == 9) {
1672          tty->print_cr(" ");
1673        }
1674      }
1675      tty->print_cr(" ");
1676    }
1677  }
1678#endif
1679
1680}
1681
1682//------------------------------do_maximally_unroll----------------------------
1683
1684void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1685  CountedLoopNode *cl = loop->_head->as_CountedLoop();
1686  assert(cl->has_exact_trip_count(), "trip count is not exact");
1687  assert(cl->trip_count() > 0, "");
1688#ifndef PRODUCT
1689  if (TraceLoopOpts) {
1690    tty->print("MaxUnroll  %d ", cl->trip_count());
1691    loop->dump_head();
1692  }
1693#endif
1694
1695  // If loop is tripping an odd number of times, peel odd iteration
1696  if ((cl->trip_count() & 1) == 1) {
1697    do_peeling(loop, old_new);
1698  }
1699
1700  // Now its tripping an even number of times remaining.  Double loop body.
1701  // Do not adjust pre-guards; they are not needed and do not exist.
1702  if (cl->trip_count() > 0) {
1703    assert((cl->trip_count() & 1) == 0, "missed peeling");
1704    do_unroll(loop, old_new, false);
1705  }
1706}
1707
1708void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) {
1709  if (SuperWordReductions == false) return;
1710
1711  CountedLoopNode* loop_head = loop->_head->as_CountedLoop();
1712  if (loop_head->unrolled_count() > 1) {
1713    return;
1714  }
1715
1716  Node* trip_phi = loop_head->phi();
1717  for (DUIterator_Fast imax, i = loop_head->fast_outs(imax); i < imax; i++) {
1718    Node* phi = loop_head->fast_out(i);
1719    if (phi->is_Phi() && phi->outcnt() > 0 && phi != trip_phi) {
1720      // For definitions which are loop inclusive and not tripcounts.
1721      Node* def_node = phi->in(LoopNode::LoopBackControl);
1722
1723      if (def_node != NULL) {
1724        Node* n_ctrl = get_ctrl(def_node);
1725        if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) {
1726          // Now test it to see if it fits the standard pattern for a reduction operator.
1727          int opc = def_node->Opcode();
1728          if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type())) {
1729            if (!def_node->is_reduction()) { // Not marked yet
1730              // To be a reduction, the arithmetic node must have the phi as input and provide a def to it
1731              bool ok = false;
1732              for (unsigned j = 1; j < def_node->req(); j++) {
1733                Node* in = def_node->in(j);
1734                if (in == phi) {
1735                  ok = true;
1736                  break;
1737                }
1738              }
1739
1740              // do nothing if we did not match the initial criteria
1741              if (ok == false) {
1742                continue;
1743              }
1744
1745              // The result of the reduction must not be used in the loop
1746              for (DUIterator_Fast imax, i = def_node->fast_outs(imax); i < imax && ok; i++) {
1747                Node* u = def_node->fast_out(i);
1748                if (!loop->is_member(get_loop(ctrl_or_self(u)))) {
1749                  continue;
1750                }
1751                if (u == phi) {
1752                  continue;
1753                }
1754                ok = false;
1755              }
1756
1757              // iff the uses conform
1758              if (ok) {
1759                def_node->add_flag(Node::Flag_is_reduction);
1760                loop_head->mark_has_reductions();
1761              }
1762            }
1763          }
1764        }
1765      }
1766    }
1767  }
1768}
1769
1770//------------------------------adjust_limit-----------------------------------
1771// Helper function for add_constraint().
1772Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
1773  // Compute "I :: (limit-offset)/scale"
1774  Node *con = new SubINode(rc_limit, offset);
1775  register_new_node(con, pre_ctrl);
1776  Node *X = new DivINode(0, con, scale);
1777  register_new_node(X, pre_ctrl);
1778
1779  // Adjust loop limit
1780  loop_limit = (stride_con > 0)
1781               ? (Node*)(new MinINode(loop_limit, X))
1782               : (Node*)(new MaxINode(loop_limit, X));
1783  register_new_node(loop_limit, pre_ctrl);
1784  return loop_limit;
1785}
1786
1787//------------------------------add_constraint---------------------------------
1788// Constrain the main loop iterations so the conditions:
1789//    low_limit <= scale_con * I + offset  <  upper_limit
1790// always holds true.  That is, either increase the number of iterations in
1791// the pre-loop or the post-loop until the condition holds true in the main
1792// loop.  Stride, scale, offset and limit are all loop invariant.  Further,
1793// stride and scale are constants (offset and limit often are).
1794void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1795  // For positive stride, the pre-loop limit always uses a MAX function
1796  // and the main loop a MIN function.  For negative stride these are
1797  // reversed.
1798
1799  // Also for positive stride*scale the affine function is increasing, so the
1800  // pre-loop must check for underflow and the post-loop for overflow.
1801  // Negative stride*scale reverses this; pre-loop checks for overflow and
1802  // post-loop for underflow.
1803
1804  Node *scale = _igvn.intcon(scale_con);
1805  set_ctrl(scale, C->root());
1806
1807  if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
1808    // The overflow limit: scale*I+offset < upper_limit
1809    // For main-loop compute
1810    //   ( if (scale > 0) /* and stride > 0 */
1811    //       I < (upper_limit-offset)/scale
1812    //     else /* scale < 0 and stride < 0 */
1813    //       I > (upper_limit-offset)/scale
1814    //   )
1815    //
1816    // (upper_limit-offset) may overflow or underflow.
1817    // But it is fine since main loop will either have
1818    // less iterations or will be skipped in such case.
1819    *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
1820
1821    // The underflow limit: low_limit <= scale*I+offset.
1822    // For pre-loop compute
1823    //   NOT(scale*I+offset >= low_limit)
1824    //   scale*I+offset < low_limit
1825    //   ( if (scale > 0) /* and stride > 0 */
1826    //       I < (low_limit-offset)/scale
1827    //     else /* scale < 0 and stride < 0 */
1828    //       I > (low_limit-offset)/scale
1829    //   )
1830
1831    if (low_limit->get_int() == -max_jint) {
1832      // We need this guard when scale*pre_limit+offset >= limit
1833      // due to underflow. So we need execute pre-loop until
1834      // scale*I+offset >= min_int. But (min_int-offset) will
1835      // underflow when offset > 0 and X will be > original_limit
1836      // when stride > 0. To avoid it we replace positive offset with 0.
1837      //
1838      // Also (min_int+1 == -max_int) is used instead of min_int here
1839      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1840      Node* shift = _igvn.intcon(31);
1841      set_ctrl(shift, C->root());
1842      Node* sign = new RShiftINode(offset, shift);
1843      register_new_node(sign, pre_ctrl);
1844      offset = new AndINode(offset, sign);
1845      register_new_node(offset, pre_ctrl);
1846    } else {
1847      assert(low_limit->get_int() == 0, "wrong low limit for range check");
1848      // The only problem we have here when offset == min_int
1849      // since (0-min_int) == min_int. It may be fine for stride > 0
1850      // but for stride < 0 X will be < original_limit. To avoid it
1851      // max(pre_limit, original_limit) is used in do_range_check().
1852    }
1853    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1854    *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
1855
1856  } else { // stride_con*scale_con < 0
1857    // For negative stride*scale pre-loop checks for overflow and
1858    // post-loop for underflow.
1859    //
1860    // The overflow limit: scale*I+offset < upper_limit
1861    // For pre-loop compute
1862    //   NOT(scale*I+offset < upper_limit)
1863    //   scale*I+offset >= upper_limit
1864    //   scale*I+offset+1 > upper_limit
1865    //   ( if (scale < 0) /* and stride > 0 */
1866    //       I < (upper_limit-(offset+1))/scale
1867    //     else /* scale > 0 and stride < 0 */
1868    //       I > (upper_limit-(offset+1))/scale
1869    //   )
1870    //
1871    // (upper_limit-offset-1) may underflow or overflow.
1872    // To avoid it min(pre_limit, original_limit) is used
1873    // in do_range_check() for stride > 0 and max() for < 0.
1874    Node *one  = _igvn.intcon(1);
1875    set_ctrl(one, C->root());
1876
1877    Node *plus_one = new AddINode(offset, one);
1878    register_new_node( plus_one, pre_ctrl );
1879    // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1880    *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
1881
1882    if (low_limit->get_int() == -max_jint) {
1883      // We need this guard when scale*main_limit+offset >= limit
1884      // due to underflow. So we need execute main-loop while
1885      // scale*I+offset+1 > min_int. But (min_int-offset-1) will
1886      // underflow when (offset+1) > 0 and X will be < main_limit
1887      // when scale < 0 (and stride > 0). To avoid it we replace
1888      // positive (offset+1) with 0.
1889      //
1890      // Also (min_int+1 == -max_int) is used instead of min_int here
1891      // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1892      Node* shift = _igvn.intcon(31);
1893      set_ctrl(shift, C->root());
1894      Node* sign = new RShiftINode(plus_one, shift);
1895      register_new_node(sign, pre_ctrl);
1896      plus_one = new AndINode(plus_one, sign);
1897      register_new_node(plus_one, pre_ctrl);
1898    } else {
1899      assert(low_limit->get_int() == 0, "wrong low limit for range check");
1900      // The only problem we have here when offset == max_int
1901      // since (max_int+1) == min_int and (0-min_int) == min_int.
1902      // But it is fine since main loop will either have
1903      // less iterations or will be skipped in such case.
1904    }
1905    // The underflow limit: low_limit <= scale*I+offset.
1906    // For main-loop compute
1907    //   scale*I+offset+1 > low_limit
1908    //   ( if (scale < 0) /* and stride > 0 */
1909    //       I < (low_limit-(offset+1))/scale
1910    //     else /* scale > 0 and stride < 0 */
1911    //       I > (low_limit-(offset+1))/scale
1912    //   )
1913
1914    *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
1915  }
1916}
1917
1918
1919//------------------------------is_scaled_iv---------------------------------
1920// Return true if exp is a constant times an induction var
1921bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1922  if (exp == iv) {
1923    if (p_scale != NULL) {
1924      *p_scale = 1;
1925    }
1926    return true;
1927  }
1928  int opc = exp->Opcode();
1929  if (opc == Op_MulI) {
1930    if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1931      if (p_scale != NULL) {
1932        *p_scale = exp->in(2)->get_int();
1933      }
1934      return true;
1935    }
1936    if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1937      if (p_scale != NULL) {
1938        *p_scale = exp->in(1)->get_int();
1939      }
1940      return true;
1941    }
1942  } else if (opc == Op_LShiftI) {
1943    if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1944      if (p_scale != NULL) {
1945        *p_scale = 1 << exp->in(2)->get_int();
1946      }
1947      return true;
1948    }
1949  }
1950  return false;
1951}
1952
1953//-----------------------------is_scaled_iv_plus_offset------------------------------
1954// Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1955bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1956  if (is_scaled_iv(exp, iv, p_scale)) {
1957    if (p_offset != NULL) {
1958      Node *zero = _igvn.intcon(0);
1959      set_ctrl(zero, C->root());
1960      *p_offset = zero;
1961    }
1962    return true;
1963  }
1964  int opc = exp->Opcode();
1965  if (opc == Op_AddI) {
1966    if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1967      if (p_offset != NULL) {
1968        *p_offset = exp->in(2);
1969      }
1970      return true;
1971    }
1972    if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1973      if (p_offset != NULL) {
1974        *p_offset = exp->in(1);
1975      }
1976      return true;
1977    }
1978    if (exp->in(2)->is_Con()) {
1979      Node* offset2 = NULL;
1980      if (depth < 2 &&
1981          is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1982                                   p_offset != NULL ? &offset2 : NULL, depth+1)) {
1983        if (p_offset != NULL) {
1984          Node *ctrl_off2 = get_ctrl(offset2);
1985          Node* offset = new AddINode(offset2, exp->in(2));
1986          register_new_node(offset, ctrl_off2);
1987          *p_offset = offset;
1988        }
1989        return true;
1990      }
1991    }
1992  } else if (opc == Op_SubI) {
1993    if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1994      if (p_offset != NULL) {
1995        Node *zero = _igvn.intcon(0);
1996        set_ctrl(zero, C->root());
1997        Node *ctrl_off = get_ctrl(exp->in(2));
1998        Node* offset = new SubINode(zero, exp->in(2));
1999        register_new_node(offset, ctrl_off);
2000        *p_offset = offset;
2001      }
2002      return true;
2003    }
2004    if (is_scaled_iv(exp->in(2), iv, p_scale)) {
2005      if (p_offset != NULL) {
2006        *p_scale *= -1;
2007        *p_offset = exp->in(1);
2008      }
2009      return true;
2010    }
2011  }
2012  return false;
2013}
2014
2015//------------------------------do_range_check---------------------------------
2016// Eliminate range-checks and other trip-counter vs loop-invariant tests.
2017int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
2018#ifndef PRODUCT
2019  if (PrintOpto && VerifyLoopOptimizations) {
2020    tty->print("Range Check Elimination ");
2021    loop->dump_head();
2022  } else if (TraceLoopOpts) {
2023    tty->print("RangeCheck   ");
2024    loop->dump_head();
2025  }
2026#endif
2027  assert(RangeCheckElimination, "");
2028  CountedLoopNode *cl = loop->_head->as_CountedLoop();
2029  // If we fail before trying to eliminate range checks, set multiversion state
2030  int closed_range_checks = 1;
2031
2032  // protect against stride not being a constant
2033  if (!cl->stride_is_con())
2034    return closed_range_checks;
2035
2036  // Find the trip counter; we are iteration splitting based on it
2037  Node *trip_counter = cl->phi();
2038  // Find the main loop limit; we will trim it's iterations
2039  // to not ever trip end tests
2040  Node *main_limit = cl->limit();
2041
2042  // Check graph shape. Cannot optimize a loop if zero-trip
2043  // Opaque1 node is optimized away and then another round
2044  // of loop opts attempted.
2045  if (!is_canonical_loop_entry(cl)) {
2046    return closed_range_checks;
2047  }
2048
2049  // Need to find the main-loop zero-trip guard
2050  Node *ctrl  = cl->in(LoopNode::EntryControl);
2051  Node *iffm = ctrl->in(0);
2052  Node *opqzm = iffm->in(1)->in(1)->in(2);
2053  assert(opqzm->in(1) == main_limit, "do not understand situation");
2054
2055  // Find the pre-loop limit; we will expand its iterations to
2056  // not ever trip low tests.
2057  Node *p_f = iffm->in(0);
2058  // pre loop may have been optimized out
2059  if (p_f->Opcode() != Op_IfFalse) {
2060    return closed_range_checks;
2061  }
2062  CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2063  assert(pre_end->loopnode()->is_pre_loop(), "");
2064  Node *pre_opaq1 = pre_end->limit();
2065  // Occasionally it's possible for a pre-loop Opaque1 node to be
2066  // optimized away and then another round of loop opts attempted.
2067  // We can not optimize this particular loop in that case.
2068  if (pre_opaq1->Opcode() != Op_Opaque1)
2069    return closed_range_checks;
2070  Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
2071  Node *pre_limit = pre_opaq->in(1);
2072
2073  // Where do we put new limit calculations
2074  Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
2075
2076  // Ensure the original loop limit is available from the
2077  // pre-loop Opaque1 node.
2078  Node *orig_limit = pre_opaq->original_loop_limit();
2079  if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
2080    return closed_range_checks;
2081
2082  // Must know if its a count-up or count-down loop
2083
2084  int stride_con = cl->stride_con();
2085  Node *zero = _igvn.intcon(0);
2086  Node *one  = _igvn.intcon(1);
2087  // Use symmetrical int range [-max_jint,max_jint]
2088  Node *mini = _igvn.intcon(-max_jint);
2089  set_ctrl(zero, C->root());
2090  set_ctrl(one,  C->root());
2091  set_ctrl(mini, C->root());
2092
2093  // Range checks that do not dominate the loop backedge (ie.
2094  // conditionally executed) can lengthen the pre loop limit beyond
2095  // the original loop limit. To prevent this, the pre limit is
2096  // (for stride > 0) MINed with the original loop limit (MAXed
2097  // stride < 0) when some range_check (rc) is conditionally
2098  // executed.
2099  bool conditional_rc = false;
2100
2101  // Count number of range checks and reduce by load range limits, if zero,
2102  // the loop is in canonical form to multiversion.
2103  closed_range_checks = 0;
2104
2105  // Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
2106  for( uint i = 0; i < loop->_body.size(); i++ ) {
2107    Node *iff = loop->_body[i];
2108    if (iff->Opcode() == Op_If ||
2109        iff->Opcode() == Op_RangeCheck) { // Test?
2110      // Test is an IfNode, has 2 projections.  If BOTH are in the loop
2111      // we need loop unswitching instead of iteration splitting.
2112      closed_range_checks++;
2113      Node *exit = loop->is_loop_exit(iff);
2114      if( !exit ) continue;
2115      int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
2116
2117      // Get boolean condition to test
2118      Node *i1 = iff->in(1);
2119      if( !i1->is_Bool() ) continue;
2120      BoolNode *bol = i1->as_Bool();
2121      BoolTest b_test = bol->_test;
2122      // Flip sense of test if exit condition is flipped
2123      if( flip )
2124        b_test = b_test.negate();
2125
2126      // Get compare
2127      Node *cmp = bol->in(1);
2128
2129      // Look for trip_counter + offset vs limit
2130      Node *rc_exp = cmp->in(1);
2131      Node *limit  = cmp->in(2);
2132      jint scale_con= 1;        // Assume trip counter not scaled
2133
2134      Node *limit_c = get_ctrl(limit);
2135      if( loop->is_member(get_loop(limit_c) ) ) {
2136        // Compare might have operands swapped; commute them
2137        b_test = b_test.commute();
2138        rc_exp = cmp->in(2);
2139        limit  = cmp->in(1);
2140        limit_c = get_ctrl(limit);
2141        if( loop->is_member(get_loop(limit_c) ) )
2142          continue;             // Both inputs are loop varying; cannot RCE
2143      }
2144      // Here we know 'limit' is loop invariant
2145
2146      // 'limit' maybe pinned below the zero trip test (probably from a
2147      // previous round of rce), in which case, it can't be used in the
2148      // zero trip test expression which must occur before the zero test's if.
2149      if( limit_c == ctrl ) {
2150        continue;  // Don't rce this check but continue looking for other candidates.
2151      }
2152
2153      // Check for scaled induction variable plus an offset
2154      Node *offset = NULL;
2155
2156      if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
2157        continue;
2158      }
2159
2160      Node *offset_c = get_ctrl(offset);
2161      if( loop->is_member( get_loop(offset_c) ) )
2162        continue;               // Offset is not really loop invariant
2163      // Here we know 'offset' is loop invariant.
2164
2165      // As above for the 'limit', the 'offset' maybe pinned below the
2166      // zero trip test.
2167      if( offset_c == ctrl ) {
2168        continue; // Don't rce this check but continue looking for other candidates.
2169      }
2170#ifdef ASSERT
2171      if (TraceRangeLimitCheck) {
2172        tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
2173        bol->dump(2);
2174      }
2175#endif
2176      // At this point we have the expression as:
2177      //   scale_con * trip_counter + offset :: limit
2178      // where scale_con, offset and limit are loop invariant.  Trip_counter
2179      // monotonically increases by stride_con, a constant.  Both (or either)
2180      // stride_con and scale_con can be negative which will flip about the
2181      // sense of the test.
2182
2183      // Adjust pre and main loop limits to guard the correct iteration set
2184      if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
2185        if( b_test._test == BoolTest::lt ) { // Range checks always use lt
2186          // The underflow and overflow limits: 0 <= scale*I+offset < limit
2187          add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
2188          // (0-offset)/scale could be outside of loop iterations range.
2189          conditional_rc = true;
2190        } else {
2191          if (PrintOpto) {
2192            tty->print_cr("missed RCE opportunity");
2193          }
2194          continue;             // In release mode, ignore it
2195        }
2196      } else {                  // Otherwise work on normal compares
2197        switch( b_test._test ) {
2198        case BoolTest::gt:
2199          // Fall into GE case
2200        case BoolTest::ge:
2201          // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
2202          scale_con = -scale_con;
2203          offset = new SubINode( zero, offset );
2204          register_new_node( offset, pre_ctrl );
2205          limit  = new SubINode( zero, limit );
2206          register_new_node( limit, pre_ctrl );
2207          // Fall into LE case
2208        case BoolTest::le:
2209          if (b_test._test != BoolTest::gt) {
2210            // Convert X <= Y to X < Y+1
2211            limit = new AddINode( limit, one );
2212            register_new_node( limit, pre_ctrl );
2213          }
2214          // Fall into LT case
2215        case BoolTest::lt:
2216          // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
2217          // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
2218          // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
2219          add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
2220          // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
2221          // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
2222          // still be outside of loop range.
2223          conditional_rc = true;
2224          break;
2225        default:
2226          if (PrintOpto) {
2227            tty->print_cr("missed RCE opportunity");
2228          }
2229          continue;             // Unhandled case
2230        }
2231      }
2232
2233      // Kill the eliminated test
2234      C->set_major_progress();
2235      Node *kill_con = _igvn.intcon( 1-flip );
2236      set_ctrl(kill_con, C->root());
2237      _igvn.replace_input_of(iff, 1, kill_con);
2238      // Find surviving projection
2239      assert(iff->is_If(), "");
2240      ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
2241      // Find loads off the surviving projection; remove their control edge
2242      for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
2243        Node* cd = dp->fast_out(i); // Control-dependent node
2244        if (cd->is_Load() && cd->depends_only_on_test()) {   // Loads can now float around in the loop
2245          // Allow the load to float around in the loop, or before it
2246          // but NOT before the pre-loop.
2247          _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
2248          --i;
2249          --imax;
2250        }
2251      }
2252      if (limit->Opcode() == Op_LoadRange) {
2253        closed_range_checks--;
2254      }
2255
2256    } // End of is IF
2257
2258  }
2259
2260  // Update loop limits
2261  if (conditional_rc) {
2262    pre_limit = (stride_con > 0) ? (Node*)new MinINode(pre_limit, orig_limit)
2263                                 : (Node*)new MaxINode(pre_limit, orig_limit);
2264    register_new_node(pre_limit, pre_ctrl);
2265  }
2266  _igvn.replace_input_of(pre_opaq, 1, pre_limit);
2267
2268  // Note:: we are making the main loop limit no longer precise;
2269  // need to round up based on stride.
2270  cl->set_nonexact_trip_count();
2271  Node *main_cle = cl->loopexit();
2272  Node *main_bol = main_cle->in(1);
2273  // Hacking loop bounds; need private copies of exit test
2274  if( main_bol->outcnt() > 1 ) {// BoolNode shared?
2275    main_bol = main_bol->clone();// Clone a private BoolNode
2276    register_new_node( main_bol, main_cle->in(0) );
2277    _igvn.replace_input_of(main_cle, 1, main_bol);
2278  }
2279  Node *main_cmp = main_bol->in(1);
2280  if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
2281    main_cmp = main_cmp->clone();// Clone a private CmpNode
2282    register_new_node( main_cmp, main_cle->in(0) );
2283    _igvn.replace_input_of(main_bol, 1, main_cmp);
2284  }
2285  // Hack the now-private loop bounds
2286  _igvn.replace_input_of(main_cmp, 2, main_limit);
2287  // The OpaqueNode is unshared by design
2288  assert( opqzm->outcnt() == 1, "cannot hack shared node" );
2289  _igvn.replace_input_of(opqzm, 1, main_limit);
2290
2291  return closed_range_checks;
2292}
2293
2294//------------------------------has_range_checks-------------------------------
2295// Check to see if RCE cleaned the current loop of range-checks.
2296void PhaseIdealLoop::has_range_checks(IdealLoopTree *loop) {
2297  assert(RangeCheckElimination, "");
2298
2299  // skip if not a counted loop
2300  if (!loop->is_counted()) return;
2301
2302  CountedLoopNode *cl = loop->_head->as_CountedLoop();
2303
2304  // skip this loop if it is already checked
2305  if (cl->has_been_range_checked()) return;
2306
2307  // Now check for existence of range checks
2308  for (uint i = 0; i < loop->_body.size(); i++) {
2309    Node *iff = loop->_body[i];
2310    int iff_opc = iff->Opcode();
2311    if (iff_opc == Op_If || iff_opc == Op_RangeCheck) {
2312      cl->mark_has_range_checks();
2313      break;
2314    }
2315  }
2316  cl->set_has_been_range_checked();
2317}
2318
2319//-------------------------multi_version_post_loops----------------------------
2320// Check the range checks that remain, if simple, use the bounds to guard
2321// which version to a post loop we execute, one with range checks or one without
2322bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoopTree *legacy_loop) {
2323  bool multi_version_succeeded = false;
2324  assert(RangeCheckElimination, "");
2325  CountedLoopNode *legacy_cl = legacy_loop->_head->as_CountedLoop();
2326  assert(legacy_cl->is_post_loop(), "");
2327
2328  // Check for existence of range checks using the unique instance to make a guard with
2329  Unique_Node_List worklist;
2330  for (uint i = 0; i < legacy_loop->_body.size(); i++) {
2331    Node *iff = legacy_loop->_body[i];
2332    int iff_opc = iff->Opcode();
2333    if (iff_opc == Op_If || iff_opc == Op_RangeCheck) {
2334      worklist.push(iff);
2335    }
2336  }
2337
2338  // Find RCE'd post loop so that we can stage its guard.
2339  if (!is_canonical_loop_entry(legacy_cl)) return multi_version_succeeded;
2340  Node* ctrl = legacy_cl->in(LoopNode::EntryControl);
2341  Node* iffm = ctrl->in(0);
2342
2343  // Now we test that both the post loops are connected
2344  Node* post_loop_region = iffm->in(0);
2345  if (post_loop_region == NULL) return multi_version_succeeded;
2346  if (!post_loop_region->is_Region()) return multi_version_succeeded;
2347  Node* covering_region = post_loop_region->in(RegionNode::Control+1);
2348  if (covering_region == NULL) return multi_version_succeeded;
2349  if (!covering_region->is_Region()) return multi_version_succeeded;
2350  Node* p_f = covering_region->in(RegionNode::Control);
2351  if (p_f == NULL) return multi_version_succeeded;
2352  if (!p_f->is_IfFalse()) return multi_version_succeeded;
2353  if (!p_f->in(0)->is_CountedLoopEnd()) return multi_version_succeeded;
2354  CountedLoopEndNode* rce_loop_end = p_f->in(0)->as_CountedLoopEnd();
2355  if (rce_loop_end == NULL) return multi_version_succeeded;
2356  CountedLoopNode* rce_cl = rce_loop_end->loopnode();
2357  if (rce_cl == NULL || !rce_cl->is_post_loop()) return multi_version_succeeded;
2358  CountedLoopNode *known_rce_cl = rce_loop->_head->as_CountedLoop();
2359  if (rce_cl != known_rce_cl) return multi_version_succeeded;
2360
2361  // Then we fetch the cover entry test
2362  ctrl = rce_cl->in(LoopNode::EntryControl);
2363  if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return multi_version_succeeded;
2364
2365#ifndef PRODUCT
2366  if (TraceLoopOpts) {
2367    tty->print("PostMultiVersion\n");
2368    rce_loop->dump_head();
2369    legacy_loop->dump_head();
2370  }
2371#endif
2372
2373  // Now fetch the limit we want to compare against
2374  Node *limit = rce_cl->limit();
2375  bool first_time = true;
2376
2377  // If we got this far, we identified the post loop which has been RCE'd and
2378  // we have a work list.  Now we will try to transform the if guard to cause
2379  // the loop pair to be multi version executed with the determination left to runtime
2380  // or the optimizer if full information is known about the given arrays at compile time.
2381  Node *last_min = NULL;
2382  multi_version_succeeded = true;
2383  while (worklist.size()) {
2384    Node* rc_iffm = worklist.pop();
2385    if (rc_iffm->is_If()) {
2386      Node *rc_bolzm = rc_iffm->in(1);
2387      if (rc_bolzm->is_Bool()) {
2388        Node *rc_cmpzm = rc_bolzm->in(1);
2389        if (rc_cmpzm->is_Cmp()) {
2390          Node *rc_left = rc_cmpzm->in(2);
2391          if (rc_left->Opcode() != Op_LoadRange) {
2392            multi_version_succeeded = false;
2393            break;
2394          }
2395          if (first_time) {
2396            last_min = rc_left;
2397            first_time = false;
2398          } else {
2399            Node *cur_min = new MinINode(last_min, rc_left);
2400            last_min = cur_min;
2401            _igvn.register_new_node_with_optimizer(last_min);
2402          }
2403        }
2404      }
2405    }
2406  }
2407
2408  // All we have to do is update the limit of the rce loop
2409  // with the min of our expression and the current limit.
2410  // We will use this expression to replace the current limit.
2411  if (last_min && multi_version_succeeded) {
2412    Node *cur_min = new MinINode(last_min, limit);
2413    _igvn.register_new_node_with_optimizer(cur_min);
2414    Node *cmp_node = rce_loop_end->cmp_node();
2415    _igvn.replace_input_of(cmp_node, 2, cur_min);
2416    set_idom(cmp_node, cur_min, dom_depth(ctrl));
2417    set_ctrl(cur_min, ctrl);
2418    set_loop(cur_min, rce_loop->_parent);
2419
2420    legacy_cl->mark_is_multiversioned();
2421    rce_cl->mark_is_multiversioned();
2422    multi_version_succeeded = true;
2423
2424    C->set_major_progress();
2425  }
2426
2427  return multi_version_succeeded;
2428}
2429
2430//-------------------------poison_rce_post_loop--------------------------------
2431// Causes the rce'd post loop to be optimized away if multiversioning fails
2432void PhaseIdealLoop::poison_rce_post_loop(IdealLoopTree *rce_loop) {
2433  CountedLoopNode *rce_cl = rce_loop->_head->as_CountedLoop();
2434  Node* ctrl = rce_cl->in(LoopNode::EntryControl);
2435  if (ctrl->is_IfTrue() || ctrl->is_IfFalse()) {
2436    Node* iffm = ctrl->in(0);
2437    if (iffm->is_If()) {
2438      Node* cur_bool = iffm->in(1);
2439      if (cur_bool->is_Bool()) {
2440        Node* cur_cmp = cur_bool->in(1);
2441        if (cur_cmp->is_Cmp()) {
2442          BoolTest::mask new_test = BoolTest::gt;
2443          BoolNode *new_bool = new BoolNode(cur_cmp, new_test);
2444          _igvn.replace_node(cur_bool, new_bool);
2445          _igvn._worklist.push(new_bool);
2446          Node* left_op = cur_cmp->in(1);
2447          _igvn.replace_input_of(cur_cmp, 2, left_op);
2448          C->set_major_progress();
2449        }
2450      }
2451    }
2452  }
2453}
2454
2455//------------------------------DCE_loop_body----------------------------------
2456// Remove simplistic dead code from loop body
2457void IdealLoopTree::DCE_loop_body() {
2458  for( uint i = 0; i < _body.size(); i++ )
2459    if( _body.at(i)->outcnt() == 0 )
2460      _body.map( i--, _body.pop() );
2461}
2462
2463
2464//------------------------------adjust_loop_exit_prob--------------------------
2465// Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
2466// Replace with a 1-in-10 exit guess.
2467void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
2468  Node *test = tail();
2469  while( test != _head ) {
2470    uint top = test->Opcode();
2471    if( top == Op_IfTrue || top == Op_IfFalse ) {
2472      int test_con = ((ProjNode*)test)->_con;
2473      assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
2474      IfNode *iff = test->in(0)->as_If();
2475      if( iff->outcnt() == 2 ) {        // Ignore dead tests
2476        Node *bol = iff->in(1);
2477        if( bol && bol->req() > 1 && bol->in(1) &&
2478            ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
2479             (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
2480             (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
2481             (bol->in(1)->Opcode() == Op_CompareAndExchangeB ) ||
2482             (bol->in(1)->Opcode() == Op_CompareAndExchangeS ) ||
2483             (bol->in(1)->Opcode() == Op_CompareAndExchangeI ) ||
2484             (bol->in(1)->Opcode() == Op_CompareAndExchangeL ) ||
2485             (bol->in(1)->Opcode() == Op_CompareAndExchangeP ) ||
2486             (bol->in(1)->Opcode() == Op_CompareAndExchangeN ) ||
2487             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapB ) ||
2488             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapS ) ||
2489             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapI ) ||
2490             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapL ) ||
2491             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapP ) ||
2492             (bol->in(1)->Opcode() == Op_WeakCompareAndSwapN ) ||
2493             (bol->in(1)->Opcode() == Op_CompareAndSwapB ) ||
2494             (bol->in(1)->Opcode() == Op_CompareAndSwapS ) ||
2495             (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
2496             (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
2497             (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
2498             (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
2499          return;               // Allocation loops RARELY take backedge
2500        // Find the OTHER exit path from the IF
2501        Node* ex = iff->proj_out(1-test_con);
2502        float p = iff->_prob;
2503        if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
2504          if( top == Op_IfTrue ) {
2505            if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
2506              iff->_prob = PROB_STATIC_FREQUENT;
2507            }
2508          } else {
2509            if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
2510              iff->_prob = PROB_STATIC_INFREQUENT;
2511            }
2512          }
2513        }
2514      }
2515    }
2516    test = phase->idom(test);
2517  }
2518}
2519
2520#ifdef ASSERT
2521static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
2522  Node *ctrl  = cl->in(LoopNode::EntryControl);
2523  assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
2524  Node *iffm = ctrl->in(0);
2525  assert(iffm->Opcode() == Op_If, "");
2526  Node *p_f = iffm->in(0);
2527  assert(p_f->Opcode() == Op_IfFalse, "");
2528  CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
2529  assert(pre_end->loopnode()->is_pre_loop(), "");
2530  return pre_end->loopnode();
2531}
2532#endif
2533
2534// Remove the main and post loops and make the pre loop execute all
2535// iterations. Useful when the pre loop is found empty.
2536void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase) {
2537  CountedLoopEndNode* pre_end = cl->loopexit();
2538  Node* pre_cmp = pre_end->cmp_node();
2539  if (pre_cmp->in(2)->Opcode() != Op_Opaque1) {
2540    // Only safe to remove the main loop if the compiler optimized it
2541    // out based on an unknown number of iterations
2542    return;
2543  }
2544
2545  // Can we find the main loop?
2546  if (_next == NULL) {
2547    return;
2548  }
2549
2550  Node* next_head = _next->_head;
2551  if (!next_head->is_CountedLoop()) {
2552    return;
2553  }
2554
2555  CountedLoopNode* main_head = next_head->as_CountedLoop();
2556  if (!main_head->is_main_loop()) {
2557    return;
2558  }
2559
2560  assert(locate_pre_from_main(main_head) == cl, "bad main loop");
2561  Node* main_iff = main_head->in(LoopNode::EntryControl)->in(0);
2562
2563  // Remove the Opaque1Node of the pre loop and make it execute all iterations
2564  phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
2565  // Remove the Opaque1Node of the main loop so it can be optimized out
2566  Node* main_cmp = main_iff->in(1)->in(1);
2567  assert(main_cmp->in(2)->Opcode() == Op_Opaque1, "main loop has no opaque node?");
2568  phase->_igvn.replace_input_of(main_cmp, 2, main_cmp->in(2)->in(1));
2569}
2570
2571//------------------------------policy_do_remove_empty_loop--------------------
2572// Micro-benchmark spamming.  Policy is to always remove empty loops.
2573// The 'DO' part is to replace the trip counter with the value it will
2574// have on the last iteration.  This will break the loop.
2575bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2576  // Minimum size must be empty loop
2577  if (_body.size() > EMPTY_LOOP_SIZE)
2578    return false;
2579
2580  if (!_head->is_CountedLoop())
2581    return false;     // Dead loop
2582  CountedLoopNode *cl = _head->as_CountedLoop();
2583  if (!cl->is_valid_counted_loop())
2584    return false; // Malformed loop
2585  if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2586    return false;             // Infinite loop
2587
2588  if (cl->is_pre_loop()) {
2589    // If the loop we are removing is a pre-loop then the main and
2590    // post loop can be removed as well
2591    remove_main_post_loops(cl, phase);
2592  }
2593
2594#ifdef ASSERT
2595  // Ensure only one phi which is the iv.
2596  Node* iv = NULL;
2597  for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2598    Node* n = cl->fast_out(i);
2599    if (n->Opcode() == Op_Phi) {
2600      assert(iv == NULL, "Too many phis" );
2601      iv = n;
2602    }
2603  }
2604  assert(iv == cl->phi(), "Wrong phi" );
2605#endif
2606
2607  // main and post loops have explicitly created zero trip guard
2608  bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2609  if (needs_guard) {
2610    // Skip guard if values not overlap.
2611    const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
2612    const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
2613    int  stride_con = cl->stride_con();
2614    if (stride_con > 0) {
2615      needs_guard = (init_t->_hi >= limit_t->_lo);
2616    } else {
2617      needs_guard = (init_t->_lo <= limit_t->_hi);
2618    }
2619  }
2620  if (needs_guard) {
2621    // Check for an obvious zero trip guard.
2622    Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
2623    if (inctrl->Opcode() == Op_IfTrue || inctrl->Opcode() == Op_IfFalse) {
2624      bool maybe_swapped = (inctrl->Opcode() == Op_IfFalse);
2625      // The test should look like just the backedge of a CountedLoop
2626      Node* iff = inctrl->in(0);
2627      if (iff->is_If()) {
2628        Node* bol = iff->in(1);
2629        if (bol->is_Bool()) {
2630          BoolTest test = bol->as_Bool()->_test;
2631          if (maybe_swapped) {
2632            test._test = test.commute();
2633            test._test = test.negate();
2634          }
2635          if (test._test == cl->loopexit()->test_trip()) {
2636            Node* cmp = bol->in(1);
2637            int init_idx = maybe_swapped ? 2 : 1;
2638            int limit_idx = maybe_swapped ? 1 : 2;
2639            if (cmp->is_Cmp() && cmp->in(init_idx) == cl->init_trip() && cmp->in(limit_idx) == cl->limit()) {
2640              needs_guard = false;
2641            }
2642          }
2643        }
2644      }
2645    }
2646  }
2647
2648#ifndef PRODUCT
2649  if (PrintOpto) {
2650    tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
2651    this->dump_head();
2652  } else if (TraceLoopOpts) {
2653    tty->print("Empty with%s zero trip guard   ", needs_guard ? "out" : "");
2654    this->dump_head();
2655  }
2656#endif
2657
2658  if (needs_guard) {
2659    // Peel the loop to ensure there's a zero trip guard
2660    Node_List old_new;
2661    phase->do_peeling(this, old_new);
2662  }
2663
2664  // Replace the phi at loop head with the final value of the last
2665  // iteration.  Then the CountedLoopEnd will collapse (backedge never
2666  // taken) and all loop-invariant uses of the exit values will be correct.
2667  Node *phi = cl->phi();
2668  Node *exact_limit = phase->exact_limit(this);
2669  if (exact_limit != cl->limit()) {
2670    // We also need to replace the original limit to collapse loop exit.
2671    Node* cmp = cl->loopexit()->cmp_node();
2672    assert(cl->limit() == cmp->in(2), "sanity");
2673    phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
2674    phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
2675  }
2676  // Note: the final value after increment should not overflow since
2677  // counted loop has limit check predicate.
2678  Node *final = new SubINode( exact_limit, cl->stride() );
2679  phase->register_new_node(final,cl->in(LoopNode::EntryControl));
2680  phase->_igvn.replace_node(phi,final);
2681  phase->C->set_major_progress();
2682  return true;
2683}
2684
2685//------------------------------policy_do_one_iteration_loop-------------------
2686// Convert one iteration loop into normal code.
2687bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
2688  if (!_head->as_Loop()->is_valid_counted_loop())
2689    return false; // Only for counted loop
2690
2691  CountedLoopNode *cl = _head->as_CountedLoop();
2692  if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
2693    return false;
2694  }
2695
2696#ifndef PRODUCT
2697  if(TraceLoopOpts) {
2698    tty->print("OneIteration ");
2699    this->dump_head();
2700  }
2701#endif
2702
2703  Node *init_n = cl->init_trip();
2704#ifdef ASSERT
2705  // Loop boundaries should be constant since trip count is exact.
2706  assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
2707#endif
2708  // Replace the phi at loop head with the value of the init_trip.
2709  // Then the CountedLoopEnd will collapse (backedge will not be taken)
2710  // and all loop-invariant uses of the exit values will be correct.
2711  phase->_igvn.replace_node(cl->phi(), cl->init_trip());
2712  phase->C->set_major_progress();
2713  return true;
2714}
2715
2716//=============================================================================
2717//------------------------------iteration_split_impl---------------------------
2718bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
2719  // Compute loop trip count if possible.
2720  compute_trip_count(phase);
2721
2722  // Convert one iteration loop into normal code.
2723  if (policy_do_one_iteration_loop(phase))
2724    return true;
2725
2726  // Check and remove empty loops (spam micro-benchmarks)
2727  if (policy_do_remove_empty_loop(phase))
2728    return true;  // Here we removed an empty loop
2729
2730  bool should_peel = policy_peeling(phase); // Should we peel?
2731
2732  bool should_unswitch = policy_unswitching(phase);
2733
2734  // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2735  // This removes loop-invariant tests (usually null checks).
2736  if (!_head->is_CountedLoop()) { // Non-counted loop
2737    if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2738      // Partial peel succeeded so terminate this round of loop opts
2739      return false;
2740    }
2741    if (should_peel) {            // Should we peel?
2742      if (PrintOpto) { tty->print_cr("should_peel"); }
2743      phase->do_peeling(this,old_new);
2744    } else if (should_unswitch) {
2745      phase->do_unswitching(this, old_new);
2746    }
2747    return true;
2748  }
2749  CountedLoopNode *cl = _head->as_CountedLoop();
2750
2751  if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
2752
2753  // Do nothing special to pre- and post- loops
2754  if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2755
2756  // Compute loop trip count from profile data
2757  compute_profile_trip_cnt(phase);
2758
2759  // Before attempting fancy unrolling, RCE or alignment, see if we want
2760  // to completely unroll this loop or do loop unswitching.
2761  if (cl->is_normal_loop()) {
2762    if (should_unswitch) {
2763      phase->do_unswitching(this, old_new);
2764      return true;
2765    }
2766    bool should_maximally_unroll =  policy_maximally_unroll(phase);
2767    if (should_maximally_unroll) {
2768      // Here we did some unrolling and peeling.  Eventually we will
2769      // completely unroll this loop and it will no longer be a loop.
2770      phase->do_maximally_unroll(this,old_new);
2771      return true;
2772    }
2773  }
2774
2775  // Skip next optimizations if running low on nodes. Note that
2776  // policy_unswitching and policy_maximally_unroll have this check.
2777  int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes();
2778  if ((int)(2 * _body.size()) > nodes_left) {
2779    return true;
2780  }
2781
2782  // Counted loops may be peeled, may need some iterations run up
2783  // front for RCE, and may want to align loop refs to a cache
2784  // line.  Thus we clone a full loop up front whose trip count is
2785  // at least 1 (if peeling), but may be several more.
2786
2787  // The main loop will start cache-line aligned with at least 1
2788  // iteration of the unrolled body (zero-trip test required) and
2789  // will have some range checks removed.
2790
2791  // A post-loop will finish any odd iterations (leftover after
2792  // unrolling), plus any needed for RCE purposes.
2793
2794  bool should_unroll = policy_unroll(phase);
2795
2796  bool should_rce = policy_range_check(phase);
2797
2798  bool should_align = policy_align(phase);
2799
2800  // If not RCE'ing (iteration splitting) or Aligning, then we do not
2801  // need a pre-loop.  We may still need to peel an initial iteration but
2802  // we will not be needing an unknown number of pre-iterations.
2803  //
2804  // Basically, if may_rce_align reports FALSE first time through,
2805  // we will not be able to later do RCE or Aligning on this loop.
2806  bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
2807
2808  // If we have any of these conditions (RCE, alignment, unrolling) met, then
2809  // we switch to the pre-/main-/post-loop model.  This model also covers
2810  // peeling.
2811  if (should_rce || should_align || should_unroll) {
2812    if (cl->is_normal_loop())  // Convert to 'pre/main/post' loops
2813      phase->insert_pre_post_loops(this,old_new, !may_rce_align);
2814
2815    // Adjust the pre- and main-loop limits to let the pre and post loops run
2816    // with full checks, but the main-loop with no checks.  Remove said
2817    // checks from the main body.
2818    if (should_rce) {
2819      if (phase->do_range_check(this, old_new) != 0) {
2820        cl->mark_has_range_checks();
2821      }
2822    } else if (PostLoopMultiversioning) {
2823      phase->has_range_checks(this);
2824    }
2825
2826    if (should_unroll && !should_peel && PostLoopMultiversioning) {
2827      // Try to setup multiversioning on main loops before they are unrolled
2828      if (cl->is_main_loop() && (cl->unrolled_count() == 1)) {
2829        phase->insert_scalar_rced_post_loop(this, old_new);
2830      }
2831    }
2832
2833    // Double loop body for unrolling.  Adjust the minimum-trip test (will do
2834    // twice as many iterations as before) and the main body limit (only do
2835    // an even number of trips).  If we are peeling, we might enable some RCE
2836    // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
2837    // peeling.
2838    if (should_unroll && !should_peel) {
2839      if (SuperWordLoopUnrollAnalysis) {
2840        phase->insert_vector_post_loop(this, old_new);
2841      }
2842      phase->do_unroll(this, old_new, true);
2843    }
2844
2845    // Adjust the pre-loop limits to align the main body
2846    // iterations.
2847    if (should_align)
2848      Unimplemented();
2849
2850  } else {                      // Else we have an unchanged counted loop
2851    if (should_peel)           // Might want to peel but do nothing else
2852      phase->do_peeling(this,old_new);
2853  }
2854  return true;
2855}
2856
2857
2858//=============================================================================
2859//------------------------------iteration_split--------------------------------
2860bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
2861  // Recursively iteration split nested loops
2862  if (_child && !_child->iteration_split(phase, old_new))
2863    return false;
2864
2865  // Clean out prior deadwood
2866  DCE_loop_body();
2867
2868
2869  // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
2870  // Replace with a 1-in-10 exit guess.
2871  if (_parent /*not the root loop*/ &&
2872      !_irreducible &&
2873      // Also ignore the occasional dead backedge
2874      !tail()->is_top()) {
2875    adjust_loop_exit_prob(phase);
2876  }
2877
2878  // Gate unrolling, RCE and peeling efforts.
2879  if (!_child &&                // If not an inner loop, do not split
2880      !_irreducible &&
2881      _allow_optimizations &&
2882      !tail()->is_top()) {     // Also ignore the occasional dead backedge
2883    if (!_has_call) {
2884        if (!iteration_split_impl(phase, old_new)) {
2885          return false;
2886        }
2887    } else if (policy_unswitching(phase)) {
2888      phase->do_unswitching(this, old_new);
2889    }
2890  }
2891
2892  // Minor offset re-organization to remove loop-fallout uses of
2893  // trip counter when there was no major reshaping.
2894  phase->reorg_offsets(this);
2895
2896  if (_next && !_next->iteration_split(phase, old_new))
2897    return false;
2898  return true;
2899}
2900
2901
2902//=============================================================================
2903// Process all the loops in the loop tree and replace any fill
2904// patterns with an intrinsic version.
2905bool PhaseIdealLoop::do_intrinsify_fill() {
2906  bool changed = false;
2907  for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2908    IdealLoopTree* lpt = iter.current();
2909    changed |= intrinsify_fill(lpt);
2910  }
2911  return changed;
2912}
2913
2914
2915// Examine an inner loop looking for a a single store of an invariant
2916// value in a unit stride loop,
2917bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2918                                     Node*& shift, Node*& con) {
2919  const char* msg = NULL;
2920  Node* msg_node = NULL;
2921
2922  store_value = NULL;
2923  con = NULL;
2924  shift = NULL;
2925
2926  // Process the loop looking for stores.  If there are multiple
2927  // stores or extra control flow give at this point.
2928  CountedLoopNode* head = lpt->_head->as_CountedLoop();
2929  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2930    Node* n = lpt->_body.at(i);
2931    if (n->outcnt() == 0) continue; // Ignore dead
2932    if (n->is_Store()) {
2933      if (store != NULL) {
2934        msg = "multiple stores";
2935        break;
2936      }
2937      int opc = n->Opcode();
2938      if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
2939        msg = "oop fills not handled";
2940        break;
2941      }
2942      Node* value = n->in(MemNode::ValueIn);
2943      if (!lpt->is_invariant(value)) {
2944        msg  = "variant store value";
2945      } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2946        msg = "not array address";
2947      }
2948      store = n;
2949      store_value = value;
2950    } else if (n->is_If() && n != head->loopexit()) {
2951      msg = "extra control flow";
2952      msg_node = n;
2953    }
2954  }
2955
2956  if (store == NULL) {
2957    // No store in loop
2958    return false;
2959  }
2960
2961  if (msg == NULL && head->stride_con() != 1) {
2962    // could handle negative strides too
2963    if (head->stride_con() < 0) {
2964      msg = "negative stride";
2965    } else {
2966      msg = "non-unit stride";
2967    }
2968  }
2969
2970  if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2971    msg = "can't handle store address";
2972    msg_node = store->in(MemNode::Address);
2973  }
2974
2975  if (msg == NULL &&
2976      (!store->in(MemNode::Memory)->is_Phi() ||
2977       store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2978    msg = "store memory isn't proper phi";
2979    msg_node = store->in(MemNode::Memory);
2980  }
2981
2982  // Make sure there is an appropriate fill routine
2983  BasicType t = store->as_Mem()->memory_type();
2984  const char* fill_name;
2985  if (msg == NULL &&
2986      StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2987    msg = "unsupported store";
2988    msg_node = store;
2989  }
2990
2991  if (msg != NULL) {
2992#ifndef PRODUCT
2993    if (TraceOptimizeFill) {
2994      tty->print_cr("not fill intrinsic candidate: %s", msg);
2995      if (msg_node != NULL) msg_node->dump();
2996    }
2997#endif
2998    return false;
2999  }
3000
3001  // Make sure the address expression can be handled.  It should be
3002  // head->phi * elsize + con.  head->phi might have a ConvI2L(CastII()).
3003  Node* elements[4];
3004  Node* cast = NULL;
3005  Node* conv = NULL;
3006  bool found_index = false;
3007  int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
3008  for (int e = 0; e < count; e++) {
3009    Node* n = elements[e];
3010    if (n->is_Con() && con == NULL) {
3011      con = n;
3012    } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
3013      Node* value = n->in(1);
3014#ifdef _LP64
3015      if (value->Opcode() == Op_ConvI2L) {
3016        conv = value;
3017        value = value->in(1);
3018      }
3019      if (value->Opcode() == Op_CastII &&
3020          value->as_CastII()->has_range_check()) {
3021        // Skip range check dependent CastII nodes
3022        cast = value;
3023        value = value->in(1);
3024      }
3025#endif
3026      if (value != head->phi()) {
3027        msg = "unhandled shift in address";
3028      } else {
3029        if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
3030          msg = "scale doesn't match";
3031        } else {
3032          found_index = true;
3033          shift = n;
3034        }
3035      }
3036    } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
3037      conv = n;
3038      n = n->in(1);
3039      if (n->Opcode() == Op_CastII &&
3040          n->as_CastII()->has_range_check()) {
3041        // Skip range check dependent CastII nodes
3042        cast = n;
3043        n = n->in(1);
3044      }
3045      if (n == head->phi()) {
3046        found_index = true;
3047      } else {
3048        msg = "unhandled input to ConvI2L";
3049      }
3050    } else if (n == head->phi()) {
3051      // no shift, check below for allowed cases
3052      found_index = true;
3053    } else {
3054      msg = "unhandled node in address";
3055      msg_node = n;
3056    }
3057  }
3058
3059  if (count == -1) {
3060    msg = "malformed address expression";
3061    msg_node = store;
3062  }
3063
3064  if (!found_index) {
3065    msg = "missing use of index";
3066  }
3067
3068  // byte sized items won't have a shift
3069  if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
3070    msg = "can't find shift";
3071    msg_node = store;
3072  }
3073
3074  if (msg != NULL) {
3075#ifndef PRODUCT
3076    if (TraceOptimizeFill) {
3077      tty->print_cr("not fill intrinsic: %s", msg);
3078      if (msg_node != NULL) msg_node->dump();
3079    }
3080#endif
3081    return false;
3082  }
3083
3084  // No make sure all the other nodes in the loop can be handled
3085  VectorSet ok(Thread::current()->resource_area());
3086
3087  // store related values are ok
3088  ok.set(store->_idx);
3089  ok.set(store->in(MemNode::Memory)->_idx);
3090
3091  CountedLoopEndNode* loop_exit = head->loopexit();
3092  guarantee(loop_exit != NULL, "no loop exit node");
3093
3094  // Loop structure is ok
3095  ok.set(head->_idx);
3096  ok.set(loop_exit->_idx);
3097  ok.set(head->phi()->_idx);
3098  ok.set(head->incr()->_idx);
3099  ok.set(loop_exit->cmp_node()->_idx);
3100  ok.set(loop_exit->in(1)->_idx);
3101
3102  // Address elements are ok
3103  if (con)   ok.set(con->_idx);
3104  if (shift) ok.set(shift->_idx);
3105  if (cast)  ok.set(cast->_idx);
3106  if (conv)  ok.set(conv->_idx);
3107
3108  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
3109    Node* n = lpt->_body.at(i);
3110    if (n->outcnt() == 0) continue; // Ignore dead
3111    if (ok.test(n->_idx)) continue;
3112    // Backedge projection is ok
3113    if (n->is_IfTrue() && n->in(0) == loop_exit) continue;
3114    if (!n->is_AddP()) {
3115      msg = "unhandled node";
3116      msg_node = n;
3117      break;
3118    }
3119  }
3120
3121  // Make sure no unexpected values are used outside the loop
3122  for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
3123    Node* n = lpt->_body.at(i);
3124    // These values can be replaced with other nodes if they are used
3125    // outside the loop.
3126    if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue;
3127    for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
3128      Node* use = iter.get();
3129      if (!lpt->_body.contains(use)) {
3130        msg = "node is used outside loop";
3131        // lpt->_body.dump();
3132        msg_node = n;
3133        break;
3134      }
3135    }
3136  }
3137
3138#ifdef ASSERT
3139  if (TraceOptimizeFill) {
3140    if (msg != NULL) {
3141      tty->print_cr("no fill intrinsic: %s", msg);
3142      if (msg_node != NULL) msg_node->dump();
3143    } else {
3144      tty->print_cr("fill intrinsic for:");
3145    }
3146    store->dump();
3147    if (Verbose) {
3148      lpt->_body.dump();
3149    }
3150  }
3151#endif
3152
3153  return msg == NULL;
3154}
3155
3156
3157
3158bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
3159  // Only for counted inner loops
3160  if (!lpt->is_counted() || !lpt->is_inner()) {
3161    return false;
3162  }
3163
3164  // Must have constant stride
3165  CountedLoopNode* head = lpt->_head->as_CountedLoop();
3166  if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
3167    return false;
3168  }
3169
3170  // Check that the body only contains a store of a loop invariant
3171  // value that is indexed by the loop phi.
3172  Node* store = NULL;
3173  Node* store_value = NULL;
3174  Node* shift = NULL;
3175  Node* offset = NULL;
3176  if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
3177    return false;
3178  }
3179
3180  Node* exit = head->loopexit()->proj_out(0);
3181  if (exit == NULL) {
3182    return false;
3183  }
3184
3185#ifndef PRODUCT
3186  if (TraceLoopOpts) {
3187    tty->print("ArrayFill    ");
3188    lpt->dump_head();
3189  }
3190#endif
3191
3192  // Now replace the whole loop body by a call to a fill routine that
3193  // covers the same region as the loop.
3194  Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
3195
3196  // Build an expression for the beginning of the copy region
3197  Node* index = head->init_trip();
3198#ifdef _LP64
3199  index = new ConvI2LNode(index);
3200  _igvn.register_new_node_with_optimizer(index);
3201#endif
3202  if (shift != NULL) {
3203    // byte arrays don't require a shift but others do.
3204    index = new LShiftXNode(index, shift->in(2));
3205    _igvn.register_new_node_with_optimizer(index);
3206  }
3207  index = new AddPNode(base, base, index);
3208  _igvn.register_new_node_with_optimizer(index);
3209  Node* from = new AddPNode(base, index, offset);
3210  _igvn.register_new_node_with_optimizer(from);
3211  // Compute the number of elements to copy
3212  Node* len = new SubINode(head->limit(), head->init_trip());
3213  _igvn.register_new_node_with_optimizer(len);
3214
3215  BasicType t = store->as_Mem()->memory_type();
3216  bool aligned = false;
3217  if (offset != NULL && head->init_trip()->is_Con()) {
3218    int element_size = type2aelembytes(t);
3219    aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
3220  }
3221
3222  // Build a call to the fill routine
3223  const char* fill_name;
3224  address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
3225  assert(fill != NULL, "what?");
3226
3227  // Convert float/double to int/long for fill routines
3228  if (t == T_FLOAT) {
3229    store_value = new MoveF2INode(store_value);
3230    _igvn.register_new_node_with_optimizer(store_value);
3231  } else if (t == T_DOUBLE) {
3232    store_value = new MoveD2LNode(store_value);
3233    _igvn.register_new_node_with_optimizer(store_value);
3234  }
3235
3236  Node* mem_phi = store->in(MemNode::Memory);
3237  Node* result_ctrl;
3238  Node* result_mem;
3239  const TypeFunc* call_type = OptoRuntime::array_fill_Type();
3240  CallLeafNode *call = new CallLeafNoFPNode(call_type, fill,
3241                                            fill_name, TypeAryPtr::get_array_body_type(t));
3242  uint cnt = 0;
3243  call->init_req(TypeFunc::Parms + cnt++, from);
3244  call->init_req(TypeFunc::Parms + cnt++, store_value);
3245#ifdef _LP64
3246  len = new ConvI2LNode(len);
3247  _igvn.register_new_node_with_optimizer(len);
3248#endif
3249  call->init_req(TypeFunc::Parms + cnt++, len);
3250#ifdef _LP64
3251  call->init_req(TypeFunc::Parms + cnt++, C->top());
3252#endif
3253  call->init_req(TypeFunc::Control,   head->init_control());
3254  call->init_req(TypeFunc::I_O,       C->top());       // Does no I/O.
3255  call->init_req(TypeFunc::Memory,    mem_phi->in(LoopNode::EntryControl));
3256  call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
3257  call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
3258  _igvn.register_new_node_with_optimizer(call);
3259  result_ctrl = new ProjNode(call,TypeFunc::Control);
3260  _igvn.register_new_node_with_optimizer(result_ctrl);
3261  result_mem = new ProjNode(call,TypeFunc::Memory);
3262  _igvn.register_new_node_with_optimizer(result_mem);
3263
3264/* Disable following optimization until proper fix (add missing checks).
3265
3266  // If this fill is tightly coupled to an allocation and overwrites
3267  // the whole body, allow it to take over the zeroing.
3268  AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
3269  if (alloc != NULL && alloc->is_AllocateArray()) {
3270    Node* length = alloc->as_AllocateArray()->Ideal_length();
3271    if (head->limit() == length &&
3272        head->init_trip() == _igvn.intcon(0)) {
3273      if (TraceOptimizeFill) {
3274        tty->print_cr("Eliminated zeroing in allocation");
3275      }
3276      alloc->maybe_set_complete(&_igvn);
3277    } else {
3278#ifdef ASSERT
3279      if (TraceOptimizeFill) {
3280        tty->print_cr("filling array but bounds don't match");
3281        alloc->dump();
3282        head->init_trip()->dump();
3283        head->limit()->dump();
3284        length->dump();
3285      }
3286#endif
3287    }
3288  }
3289*/
3290
3291  // Redirect the old control and memory edges that are outside the loop.
3292  // Sometimes the memory phi of the head is used as the outgoing
3293  // state of the loop.  It's safe in this case to replace it with the
3294  // result_mem.
3295  _igvn.replace_node(store->in(MemNode::Memory), result_mem);
3296  lazy_replace(exit, result_ctrl);
3297  _igvn.replace_node(store, result_mem);
3298  // Any uses the increment outside of the loop become the loop limit.
3299  _igvn.replace_node(head->incr(), head->limit());
3300
3301  // Disconnect the head from the loop.
3302  for (uint i = 0; i < lpt->_body.size(); i++) {
3303    Node* n = lpt->_body.at(i);
3304    _igvn.replace_node(n, C->top());
3305  }
3306
3307  return true;
3308}
3309