parse2.cpp revision 7890:f83851ae258e
1250008Sadrian/*
2250008Sadrian * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
3250008Sadrian * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4250008Sadrian *
5250008Sadrian * This code is free software; you can redistribute it and/or modify it
6250008Sadrian * under the terms of the GNU General Public License version 2 only, as
7250008Sadrian * published by the Free Software Foundation.
8250008Sadrian *
9250008Sadrian * This code is distributed in the hope that it will be useful, but WITHOUT
10250008Sadrian * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11250008Sadrian * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12250008Sadrian * version 2 for more details (a copy is included in the LICENSE file that
13250008Sadrian * accompanied this code).
14250008Sadrian *
15250008Sadrian * You should have received a copy of the GNU General Public License version
16250008Sadrian * 2 along with this work; if not, write to the Free Software Foundation,
17250008Sadrian * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18250008Sadrian *
19250008Sadrian * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20250008Sadrian * or visit www.oracle.com if you need additional information or have any
21250008Sadrian * questions.
22250008Sadrian *
23250008Sadrian */
24250008Sadrian
25250008Sadrian#include "precompiled.hpp"
26250008Sadrian#include "ci/ciMethodData.hpp"
27250008Sadrian#include "classfile/systemDictionary.hpp"
28250008Sadrian#include "classfile/vmSymbols.hpp"
29250008Sadrian#include "compiler/compileLog.hpp"
30250008Sadrian#include "interpreter/linkResolver.hpp"
31250008Sadrian#include "memory/universe.inline.hpp"
32250008Sadrian#include "oops/oop.inline.hpp"
33250008Sadrian#include "opto/addnode.hpp"
34250008Sadrian#include "opto/castnode.hpp"
35250008Sadrian#include "opto/convertnode.hpp"
36250008Sadrian#include "opto/divnode.hpp"
37250008Sadrian#include "opto/idealGraphPrinter.hpp"
38250008Sadrian#include "opto/matcher.hpp"
39250008Sadrian#include "opto/memnode.hpp"
40250008Sadrian#include "opto/mulnode.hpp"
41250008Sadrian#include "opto/opaquenode.hpp"
42250008Sadrian#include "opto/parse.hpp"
43250008Sadrian#include "opto/runtime.hpp"
44250008Sadrian#include "runtime/deoptimization.hpp"
45250008Sadrian#include "runtime/sharedRuntime.hpp"
46250008Sadrian
47250008Sadrianextern int explicit_null_checks_inserted,
48250008Sadrian           explicit_null_checks_elided;
49250008Sadrian
50250008Sadrian//---------------------------------array_load----------------------------------
51250008Sadrianvoid Parse::array_load(BasicType elem_type) {
52250008Sadrian  const Type* elem = Type::TOP;
53250008Sadrian  Node* adr = array_addressing(elem_type, 0, &elem);
54250008Sadrian  if (stopped())  return;     // guaranteed null or range check
55250008Sadrian  dec_sp(2);                  // Pop array and index
56250008Sadrian  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
57250008Sadrian  Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered);
58250008Sadrian  push(ld);
59250008Sadrian}
60250008Sadrian
61250008Sadrian
62250008Sadrian//--------------------------------array_store----------------------------------
63250008Sadrianvoid Parse::array_store(BasicType elem_type) {
64250008Sadrian  Node* adr = array_addressing(elem_type, 1);
65250008Sadrian  if (stopped())  return;     // guaranteed null or range check
66250008Sadrian  Node* val = pop();
67250008Sadrian  dec_sp(2);                  // Pop array and index
68250008Sadrian  const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
69250008Sadrian  store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type));
70250008Sadrian}
71250008Sadrian
72250008Sadrian
73250008Sadrian//------------------------------array_addressing-------------------------------
74250008Sadrian// Pull array and index from the stack.  Compute pointer-to-element.
75250008SadrianNode* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
76250008Sadrian  Node *idx   = peek(0+vals);   // Get from stack without popping
77250008Sadrian  Node *ary   = peek(1+vals);   // in case of exception
78250008Sadrian
79250008Sadrian  // Null check the array base, with correct stack contents
80250008Sadrian  ary = null_check(ary, T_ARRAY);
81250008Sadrian  // Compile-time detect of null-exception?
82250008Sadrian  if (stopped())  return top();
83250008Sadrian
84250008Sadrian  const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
85250008Sadrian  const TypeInt*    sizetype = arytype->size();
86250008Sadrian  const Type*       elemtype = arytype->elem();
87250008Sadrian
88250008Sadrian  if (UseUniqueSubclasses && result2 != NULL) {
89250008Sadrian    const Type* el = elemtype->make_ptr();
90250008Sadrian    if (el && el->isa_instptr()) {
91250008Sadrian      const TypeInstPtr* toop = el->is_instptr();
92250008Sadrian      if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
93250008Sadrian        // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
94250008Sadrian        const Type* subklass = Type::get_const_type(toop->klass());
95250008Sadrian        elemtype = subklass->join_speculative(el);
96250008Sadrian      }
97250008Sadrian    }
98250008Sadrian  }
99250008Sadrian
100250008Sadrian  // Check for big class initializers with all constant offsets
101250008Sadrian  // feeding into a known-size array.
102250008Sadrian  const TypeInt* idxtype = _gvn.type(idx)->is_int();
103250008Sadrian  // See if the highest idx value is less than the lowest array bound,
104250008Sadrian  // and if the idx value cannot be negative:
105250008Sadrian  bool need_range_check = true;
106250008Sadrian  if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
107250008Sadrian    need_range_check = false;
108250008Sadrian    if (C->log() != NULL)   C->log()->elem("observe that='!need_range_check'");
109250008Sadrian  }
110250008Sadrian
111250008Sadrian  ciKlass * arytype_klass = arytype->klass();
112250008Sadrian  if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
113250008Sadrian    // Only fails for some -Xcomp runs
114250008Sadrian    // The class is unloaded.  We have to run this bytecode in the interpreter.
115250008Sadrian    uncommon_trap(Deoptimization::Reason_unloaded,
116250008Sadrian                  Deoptimization::Action_reinterpret,
117250008Sadrian                  arytype->klass(), "!loaded array");
118250008Sadrian    return top();
119250008Sadrian  }
120250008Sadrian
121250008Sadrian  // Do the range check
122250008Sadrian  if (GenerateRangeChecks && need_range_check) {
123250008Sadrian    Node* tst;
124250008Sadrian    if (sizetype->_hi <= 0) {
125250008Sadrian      // The greatest array bound is negative, so we can conclude that we're
126250008Sadrian      // compiling unreachable code, but the unsigned compare trick used below
127250008Sadrian      // only works with non-negative lengths.  Instead, hack "tst" to be zero so
128250008Sadrian      // the uncommon_trap path will always be taken.
129250008Sadrian      tst = _gvn.intcon(0);
130250008Sadrian    } else {
131250008Sadrian      // Range is constant in array-oop, so we can use the original state of mem
132250008Sadrian      Node* len = load_array_length(ary);
133250008Sadrian
134250008Sadrian      // Test length vs index (standard trick using unsigned compare)
135250008Sadrian      Node* chk = _gvn.transform( new CmpUNode(idx, len) );
136250008Sadrian      BoolTest::mask btest = BoolTest::lt;
137250008Sadrian      tst = _gvn.transform( new BoolNode(chk, btest) );
138250008Sadrian    }
139250008Sadrian    // Branch to failure if out of bounds
140250008Sadrian    { BuildCutout unless(this, tst, PROB_MAX);
141250008Sadrian      if (C->allow_range_check_smearing()) {
142250008Sadrian        // Do not use builtin_throw, since range checks are sometimes
143250008Sadrian        // made more stringent by an optimistic transformation.
144250008Sadrian        // This creates "tentative" range checks at this point,
145250008Sadrian        // which are not guaranteed to throw exceptions.
146250008Sadrian        // See IfNode::Ideal, is_range_check, adjust_check.
147250008Sadrian        uncommon_trap(Deoptimization::Reason_range_check,
148250008Sadrian                      Deoptimization::Action_make_not_entrant,
149250008Sadrian                      NULL, "range_check");
150250008Sadrian      } else {
151250008Sadrian        // If we have already recompiled with the range-check-widening
152250008Sadrian        // heroic optimization turned off, then we must really be throwing
153250008Sadrian        // range check exceptions.
154250008Sadrian        builtin_throw(Deoptimization::Reason_range_check, idx);
155250008Sadrian      }
156250008Sadrian    }
157250008Sadrian  }
158250008Sadrian  // Check for always knowing you are throwing a range-check exception
159250008Sadrian  if (stopped())  return top();
160250008Sadrian
161250008Sadrian  Node* ptr = array_element_address(ary, idx, type, sizetype);
162250008Sadrian
163250008Sadrian  if (result2 != NULL)  *result2 = elemtype;
164250008Sadrian
165250008Sadrian  assert(ptr != top(), "top should go hand-in-hand with stopped");
166250008Sadrian
167250008Sadrian  return ptr;
168250008Sadrian}
169250008Sadrian
170250008Sadrian
171250008Sadrian// returns IfNode
172IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
173  Node   *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
174  Node   *tst = _gvn.transform( new BoolNode( cmp, mask));
175  IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
176  return iff;
177}
178
179// return Region node
180Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
181  Node *region  = new RegionNode(3); // 2 results
182  record_for_igvn(region);
183  region->init_req(1, iffalse);
184  region->init_req(2, iftrue );
185  _gvn.set_type(region, Type::CONTROL);
186  region = _gvn.transform(region);
187  set_control (region);
188  return region;
189}
190
191
192//------------------------------helper for tableswitch-------------------------
193void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
194  // True branch, use existing map info
195  { PreserveJVMState pjvms(this);
196    Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
197    set_control( iftrue );
198    profile_switch_case(prof_table_index);
199    merge_new_path(dest_bci_if_true);
200  }
201
202  // False branch
203  Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
204  set_control( iffalse );
205}
206
207void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
208  // True branch, use existing map info
209  { PreserveJVMState pjvms(this);
210    Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
211    set_control( iffalse );
212    profile_switch_case(prof_table_index);
213    merge_new_path(dest_bci_if_true);
214  }
215
216  // False branch
217  Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
218  set_control( iftrue );
219}
220
221void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
222  // False branch, use existing map and control()
223  profile_switch_case(prof_table_index);
224  merge_new_path(dest_bci);
225}
226
227
228extern "C" {
229  static int jint_cmp(const void *i, const void *j) {
230    int a = *(jint *)i;
231    int b = *(jint *)j;
232    return a > b ? 1 : a < b ? -1 : 0;
233  }
234}
235
236
237// Default value for methodData switch indexing. Must be a negative value to avoid
238// conflict with any legal switch index.
239#define NullTableIndex -1
240
241class SwitchRange : public StackObj {
242  // a range of integers coupled with a bci destination
243  jint _lo;                     // inclusive lower limit
244  jint _hi;                     // inclusive upper limit
245  int _dest;
246  int _table_index;             // index into method data table
247
248public:
249  jint lo() const              { return _lo;   }
250  jint hi() const              { return _hi;   }
251  int  dest() const            { return _dest; }
252  int  table_index() const     { return _table_index; }
253  bool is_singleton() const    { return _lo == _hi; }
254
255  void setRange(jint lo, jint hi, int dest, int table_index) {
256    assert(lo <= hi, "must be a non-empty range");
257    _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
258  }
259  bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
260    assert(lo <= hi, "must be a non-empty range");
261    if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
262      _hi = hi;
263      return true;
264    }
265    return false;
266  }
267
268  void set (jint value, int dest, int table_index) {
269    setRange(value, value, dest, table_index);
270  }
271  bool adjoin(jint value, int dest, int table_index) {
272    return adjoinRange(value, value, dest, table_index);
273  }
274
275  void print() {
276    if (is_singleton())
277      tty->print(" {%d}=>%d", lo(), dest());
278    else if (lo() == min_jint)
279      tty->print(" {..%d}=>%d", hi(), dest());
280    else if (hi() == max_jint)
281      tty->print(" {%d..}=>%d", lo(), dest());
282    else
283      tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
284  }
285};
286
287
288//-------------------------------do_tableswitch--------------------------------
289void Parse::do_tableswitch() {
290  Node* lookup = pop();
291
292  // Get information about tableswitch
293  int default_dest = iter().get_dest_table(0);
294  int lo_index     = iter().get_int_table(1);
295  int hi_index     = iter().get_int_table(2);
296  int len          = hi_index - lo_index + 1;
297
298  if (len < 1) {
299    // If this is a backward branch, add safepoint
300    maybe_add_safepoint(default_dest);
301    merge(default_dest);
302    return;
303  }
304
305  // generate decision tree, using trichotomy when possible
306  int rnum = len+2;
307  bool makes_backward_branch = false;
308  SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
309  int rp = -1;
310  if (lo_index != min_jint) {
311    ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
312  }
313  for (int j = 0; j < len; j++) {
314    jint match_int = lo_index+j;
315    int  dest      = iter().get_dest_table(j+3);
316    makes_backward_branch |= (dest <= bci());
317    int  table_index = method_data_update() ? j : NullTableIndex;
318    if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
319      ranges[++rp].set(match_int, dest, table_index);
320    }
321  }
322  jint highest = lo_index+(len-1);
323  assert(ranges[rp].hi() == highest, "");
324  if (highest != max_jint
325      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
326    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
327  }
328  assert(rp < len+2, "not too many ranges");
329
330  // Safepoint in case if backward branch observed
331  if( makes_backward_branch && UseLoopSafepoints )
332    add_safepoint();
333
334  jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
335}
336
337
338//------------------------------do_lookupswitch--------------------------------
339void Parse::do_lookupswitch() {
340  Node *lookup = pop();         // lookup value
341  // Get information about lookupswitch
342  int default_dest = iter().get_dest_table(0);
343  int len          = iter().get_int_table(1);
344
345  if (len < 1) {    // If this is a backward branch, add safepoint
346    maybe_add_safepoint(default_dest);
347    merge(default_dest);
348    return;
349  }
350
351  // generate decision tree, using trichotomy when possible
352  jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
353  {
354    for( int j = 0; j < len; j++ ) {
355      table[j+j+0] = iter().get_int_table(2+j+j);
356      table[j+j+1] = iter().get_dest_table(2+j+j+1);
357    }
358    qsort( table, len, 2*sizeof(table[0]), jint_cmp );
359  }
360
361  int rnum = len*2+1;
362  bool makes_backward_branch = false;
363  SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
364  int rp = -1;
365  for( int j = 0; j < len; j++ ) {
366    jint match_int   = table[j+j+0];
367    int  dest        = table[j+j+1];
368    int  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
369    int  table_index = method_data_update() ? j : NullTableIndex;
370    makes_backward_branch |= (dest <= bci());
371    if( match_int != next_lo ) {
372      ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
373    }
374    if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
375      ranges[++rp].set(match_int, dest, table_index);
376    }
377  }
378  jint highest = table[2*(len-1)];
379  assert(ranges[rp].hi() == highest, "");
380  if( highest != max_jint
381      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
382    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
383  }
384  assert(rp < rnum, "not too many ranges");
385
386  // Safepoint in case backward branch observed
387  if( makes_backward_branch && UseLoopSafepoints )
388    add_safepoint();
389
390  jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
391}
392
393//----------------------------create_jump_tables-------------------------------
394bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
395  // Are jumptables enabled
396  if (!UseJumpTables)  return false;
397
398  // Are jumptables supported
399  if (!Matcher::has_match_rule(Op_Jump))  return false;
400
401  // Don't make jump table if profiling
402  if (method_data_update())  return false;
403
404  // Decide if a guard is needed to lop off big ranges at either (or
405  // both) end(s) of the input set. We'll call this the default target
406  // even though we can't be sure that it is the true "default".
407
408  bool needs_guard = false;
409  int default_dest;
410  int64_t total_outlier_size = 0;
411  int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
412  int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
413
414  if (lo->dest() == hi->dest()) {
415    total_outlier_size = hi_size + lo_size;
416    default_dest = lo->dest();
417  } else if (lo_size > hi_size) {
418    total_outlier_size = lo_size;
419    default_dest = lo->dest();
420  } else {
421    total_outlier_size = hi_size;
422    default_dest = hi->dest();
423  }
424
425  // If a guard test will eliminate very sparse end ranges, then
426  // it is worth the cost of an extra jump.
427  if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
428    needs_guard = true;
429    if (default_dest == lo->dest()) lo++;
430    if (default_dest == hi->dest()) hi--;
431  }
432
433  // Find the total number of cases and ranges
434  int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
435  int num_range = hi - lo + 1;
436
437  // Don't create table if: too large, too small, or too sparse.
438  if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
439    return false;
440  if (num_cases > (MaxJumpTableSparseness * num_range))
441    return false;
442
443  // Normalize table lookups to zero
444  int lowval = lo->lo();
445  key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
446
447  // Generate a guard to protect against input keyvals that aren't
448  // in the switch domain.
449  if (needs_guard) {
450    Node*   size = _gvn.intcon(num_cases);
451    Node*   cmp = _gvn.transform( new CmpUNode(key_val, size) );
452    Node*   tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) );
453    IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
454    jump_if_true_fork(iff, default_dest, NullTableIndex);
455  }
456
457  // Create an ideal node JumpTable that has projections
458  // of all possible ranges for a switch statement
459  // The key_val input must be converted to a pointer offset and scaled.
460  // Compare Parse::array_addressing above.
461#ifdef _LP64
462  // Clean the 32-bit int into a real 64-bit offset.
463  // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
464  const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
465  key_val       = _gvn.transform( new ConvI2LNode(key_val, lkeytype) );
466#endif
467  // Shift the value by wordsize so we have an index into the table, rather
468  // than a switch value
469  Node *shiftWord = _gvn.MakeConX(wordSize);
470  key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
471
472  // Create the JumpNode
473  Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) );
474
475  // These are the switch destinations hanging off the jumpnode
476  int i = 0;
477  for (SwitchRange* r = lo; r <= hi; r++) {
478    for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
479      Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
480      {
481        PreserveJVMState pjvms(this);
482        set_control(input);
483        jump_if_always_fork(r->dest(), r->table_index());
484      }
485    }
486  }
487  assert(i == num_cases, "miscount of cases");
488  stop_and_kill_map();  // no more uses for this JVMS
489  return true;
490}
491
492//----------------------------jump_switch_ranges-------------------------------
493void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
494  Block* switch_block = block();
495
496  if (switch_depth == 0) {
497    // Do special processing for the top-level call.
498    assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
499    assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
500
501    // Decrement pred-numbers for the unique set of nodes.
502#ifdef ASSERT
503    // Ensure that the block's successors are a (duplicate-free) set.
504    int successors_counted = 0;  // block occurrences in [hi..lo]
505    int unique_successors = switch_block->num_successors();
506    for (int i = 0; i < unique_successors; i++) {
507      Block* target = switch_block->successor_at(i);
508
509      // Check that the set of successors is the same in both places.
510      int successors_found = 0;
511      for (SwitchRange* p = lo; p <= hi; p++) {
512        if (p->dest() == target->start())  successors_found++;
513      }
514      assert(successors_found > 0, "successor must be known");
515      successors_counted += successors_found;
516    }
517    assert(successors_counted == (hi-lo)+1, "no unexpected successors");
518#endif
519
520    // Maybe prune the inputs, based on the type of key_val.
521    jint min_val = min_jint;
522    jint max_val = max_jint;
523    const TypeInt* ti = key_val->bottom_type()->isa_int();
524    if (ti != NULL) {
525      min_val = ti->_lo;
526      max_val = ti->_hi;
527      assert(min_val <= max_val, "invalid int type");
528    }
529    while (lo->hi() < min_val)  lo++;
530    if (lo->lo() < min_val)  lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
531    while (hi->lo() > max_val)  hi--;
532    if (hi->hi() > max_val)  hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
533  }
534
535#ifndef PRODUCT
536  if (switch_depth == 0) {
537    _max_switch_depth = 0;
538    _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
539  }
540#endif
541
542  assert(lo <= hi, "must be a non-empty set of ranges");
543  if (lo == hi) {
544    jump_if_always_fork(lo->dest(), lo->table_index());
545  } else {
546    assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
547    assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
548
549    if (create_jump_tables(key_val, lo, hi)) return;
550
551    int nr = hi - lo + 1;
552
553    SwitchRange* mid = lo + nr/2;
554    // if there is an easy choice, pivot at a singleton:
555    if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
556
557    assert(lo < mid && mid <= hi, "good pivot choice");
558    assert(nr != 2 || mid == hi,   "should pick higher of 2");
559    assert(nr != 3 || mid == hi-1, "should pick middle of 3");
560
561    Node *test_val = _gvn.intcon(mid->lo());
562
563    if (mid->is_singleton()) {
564      IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
565      jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
566
567      // Special Case:  If there are exactly three ranges, and the high
568      // and low range each go to the same place, omit the "gt" test,
569      // since it will not discriminate anything.
570      bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
571      if (eq_test_only) {
572        assert(mid == hi-1, "");
573      }
574
575      // if there is a higher range, test for it and process it:
576      if (mid < hi && !eq_test_only) {
577        // two comparisons of same values--should enable 1 test for 2 branches
578        // Use BoolTest::le instead of BoolTest::gt
579        IfNode *iff_le  = jump_if_fork_int(key_val, test_val, BoolTest::le);
580        Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_le) );
581        Node   *iffalse = _gvn.transform( new IfFalseNode(iff_le) );
582        { PreserveJVMState pjvms(this);
583          set_control(iffalse);
584          jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
585        }
586        set_control(iftrue);
587      }
588
589    } else {
590      // mid is a range, not a singleton, so treat mid..hi as a unit
591      IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
592
593      // if there is a higher range, test for it and process it:
594      if (mid == hi) {
595        jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
596      } else {
597        Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
598        Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
599        { PreserveJVMState pjvms(this);
600          set_control(iftrue);
601          jump_switch_ranges(key_val, mid, hi, switch_depth+1);
602        }
603        set_control(iffalse);
604      }
605    }
606
607    // in any case, process the lower range
608    jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
609  }
610
611  // Decrease pred_count for each successor after all is done.
612  if (switch_depth == 0) {
613    int unique_successors = switch_block->num_successors();
614    for (int i = 0; i < unique_successors; i++) {
615      Block* target = switch_block->successor_at(i);
616      // Throw away the pre-allocated path for each unique successor.
617      target->next_path_num();
618    }
619  }
620
621#ifndef PRODUCT
622  _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
623  if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
624    SwitchRange* r;
625    int nsing = 0;
626    for( r = lo; r <= hi; r++ ) {
627      if( r->is_singleton() )  nsing++;
628    }
629    tty->print(">>> ");
630    _method->print_short_name();
631    tty->print_cr(" switch decision tree");
632    tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
633                  (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
634    if (_max_switch_depth > _est_switch_depth) {
635      tty->print_cr("******** BAD SWITCH DEPTH ********");
636    }
637    tty->print("   ");
638    for( r = lo; r <= hi; r++ ) {
639      r->print();
640    }
641    tty->cr();
642  }
643#endif
644}
645
646void Parse::modf() {
647  Node *f2 = pop();
648  Node *f1 = pop();
649  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
650                              CAST_FROM_FN_PTR(address, SharedRuntime::frem),
651                              "frem", NULL, //no memory effects
652                              f1, f2);
653  Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
654
655  push(res);
656}
657
658void Parse::modd() {
659  Node *d2 = pop_pair();
660  Node *d1 = pop_pair();
661  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
662                              CAST_FROM_FN_PTR(address, SharedRuntime::drem),
663                              "drem", NULL, //no memory effects
664                              d1, top(), d2, top());
665  Node* res_d   = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
666
667#ifdef ASSERT
668  Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1));
669  assert(res_top == top(), "second value must be top");
670#endif
671
672  push_pair(res_d);
673}
674
675void Parse::l2f() {
676  Node* f2 = pop();
677  Node* f1 = pop();
678  Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
679                              CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
680                              "l2f", NULL, //no memory effects
681                              f1, f2);
682  Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
683
684  push(res);
685}
686
687void Parse::do_irem() {
688  // Must keep both values on the expression-stack during null-check
689  zero_check_int(peek());
690  // Compile-time detect of null-exception?
691  if (stopped())  return;
692
693  Node* b = pop();
694  Node* a = pop();
695
696  const Type *t = _gvn.type(b);
697  if (t != Type::TOP) {
698    const TypeInt *ti = t->is_int();
699    if (ti->is_con()) {
700      int divisor = ti->get_con();
701      // check for positive power of 2
702      if (divisor > 0 &&
703          (divisor & ~(divisor-1)) == divisor) {
704        // yes !
705        Node *mask = _gvn.intcon((divisor - 1));
706        // Sigh, must handle negative dividends
707        Node *zero = _gvn.intcon(0);
708        IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
709        Node *iff = _gvn.transform( new IfFalseNode(ifff) );
710        Node *ift = _gvn.transform( new IfTrueNode (ifff) );
711        Node *reg = jump_if_join(ift, iff);
712        Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
713        // Negative path; negate/and/negate
714        Node *neg = _gvn.transform( new SubINode(zero, a) );
715        Node *andn= _gvn.transform( new AndINode(neg, mask) );
716        Node *negn= _gvn.transform( new SubINode(zero, andn) );
717        phi->init_req(1, negn);
718        // Fast positive case
719        Node *andx = _gvn.transform( new AndINode(a, mask) );
720        phi->init_req(2, andx);
721        // Push the merge
722        push( _gvn.transform(phi) );
723        return;
724      }
725    }
726  }
727  // Default case
728  push( _gvn.transform( new ModINode(control(),a,b) ) );
729}
730
731// Handle jsr and jsr_w bytecode
732void Parse::do_jsr() {
733  assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
734
735  // Store information about current state, tagged with new _jsr_bci
736  int return_bci = iter().next_bci();
737  int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
738
739  // Update method data
740  profile_taken_branch(jsr_bci);
741
742  // The way we do things now, there is only one successor block
743  // for the jsr, because the target code is cloned by ciTypeFlow.
744  Block* target = successor_for_bci(jsr_bci);
745
746  // What got pushed?
747  const Type* ret_addr = target->peek();
748  assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
749
750  // Effect on jsr on stack
751  push(_gvn.makecon(ret_addr));
752
753  // Flow to the jsr.
754  merge(jsr_bci);
755}
756
757// Handle ret bytecode
758void Parse::do_ret() {
759  // Find to whom we return.
760  assert(block()->num_successors() == 1, "a ret can only go one place now");
761  Block* target = block()->successor_at(0);
762  assert(!target->is_ready(), "our arrival must be expected");
763  profile_ret(target->flow()->start());
764  int pnum = target->next_path_num();
765  merge_common(target, pnum);
766}
767
768static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
769  if (btest != BoolTest::eq && btest != BoolTest::ne) {
770    // Only ::eq and ::ne are supported for profile injection.
771    return false;
772  }
773  if (test->is_Cmp() &&
774      test->in(1)->Opcode() == Op_ProfileBoolean) {
775    ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
776    int false_cnt = profile->false_count();
777    int  true_cnt = profile->true_count();
778
779    // Counts matching depends on the actual test operation (::eq or ::ne).
780    // No need to scale the counts because profile injection was designed
781    // to feed exact counts into VM.
782    taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
783    not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
784
785    profile->consume();
786    return true;
787  }
788  return false;
789}
790//--------------------------dynamic_branch_prediction--------------------------
791// Try to gather dynamic branch prediction behavior.  Return a probability
792// of the branch being taken and set the "cnt" field.  Returns a -1.0
793// if we need to use static prediction for some reason.
794float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
795  ResourceMark rm;
796
797  cnt  = COUNT_UNKNOWN;
798
799  int     taken = 0;
800  int not_taken = 0;
801
802  bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
803
804  if (use_mdo) {
805    // Use MethodData information if it is available
806    // FIXME: free the ProfileData structure
807    ciMethodData* methodData = method()->method_data();
808    if (!methodData->is_mature())  return PROB_UNKNOWN;
809    ciProfileData* data = methodData->bci_to_data(bci());
810    if (!data->is_JumpData())  return PROB_UNKNOWN;
811
812    // get taken and not taken values
813    taken = data->as_JumpData()->taken();
814    not_taken = 0;
815    if (data->is_BranchData()) {
816      not_taken = data->as_BranchData()->not_taken();
817    }
818
819    // scale the counts to be commensurate with invocation counts:
820    taken = method()->scale_count(taken);
821    not_taken = method()->scale_count(not_taken);
822  }
823
824  // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
825  // We also check that individual counters are positive first, otherwise the sum can become positive.
826  if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
827    if (C->log() != NULL) {
828      C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
829    }
830    return PROB_UNKNOWN;
831  }
832
833  // Compute frequency that we arrive here
834  float sum = taken + not_taken;
835  // Adjust, if this block is a cloned private block but the
836  // Jump counts are shared.  Taken the private counts for
837  // just this path instead of the shared counts.
838  if( block()->count() > 0 )
839    sum = block()->count();
840  cnt = sum / FreqCountInvocations;
841
842  // Pin probability to sane limits
843  float prob;
844  if( !taken )
845    prob = (0+PROB_MIN) / 2;
846  else if( !not_taken )
847    prob = (1+PROB_MAX) / 2;
848  else {                         // Compute probability of true path
849    prob = (float)taken / (float)(taken + not_taken);
850    if (prob > PROB_MAX)  prob = PROB_MAX;
851    if (prob < PROB_MIN)   prob = PROB_MIN;
852  }
853
854  assert((cnt > 0.0f) && (prob > 0.0f),
855         "Bad frequency assignment in if");
856
857  if (C->log() != NULL) {
858    const char* prob_str = NULL;
859    if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
860    if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
861    char prob_str_buf[30];
862    if (prob_str == NULL) {
863      sprintf(prob_str_buf, "%g", prob);
864      prob_str = prob_str_buf;
865    }
866    C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
867                   iter().get_dest(), taken, not_taken, cnt, prob_str);
868  }
869  return prob;
870}
871
872//-----------------------------branch_prediction-------------------------------
873float Parse::branch_prediction(float& cnt,
874                               BoolTest::mask btest,
875                               int target_bci,
876                               Node* test) {
877  float prob = dynamic_branch_prediction(cnt, btest, test);
878  // If prob is unknown, switch to static prediction
879  if (prob != PROB_UNKNOWN)  return prob;
880
881  prob = PROB_FAIR;                   // Set default value
882  if (btest == BoolTest::eq)          // Exactly equal test?
883    prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
884  else if (btest == BoolTest::ne)
885    prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
886
887  // If this is a conditional test guarding a backwards branch,
888  // assume its a loop-back edge.  Make it a likely taken branch.
889  if (target_bci < bci()) {
890    if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
891      // Since it's an OSR, we probably have profile data, but since
892      // branch_prediction returned PROB_UNKNOWN, the counts are too small.
893      // Let's make a special check here for completely zero counts.
894      ciMethodData* methodData = method()->method_data();
895      if (!methodData->is_empty()) {
896        ciProfileData* data = methodData->bci_to_data(bci());
897        // Only stop for truly zero counts, which mean an unknown part
898        // of the OSR-ed method, and we want to deopt to gather more stats.
899        // If you have ANY counts, then this loop is simply 'cold' relative
900        // to the OSR loop.
901        if (data->as_BranchData()->taken() +
902            data->as_BranchData()->not_taken() == 0 ) {
903          // This is the only way to return PROB_UNKNOWN:
904          return PROB_UNKNOWN;
905        }
906      }
907    }
908    prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
909  }
910
911  assert(prob != PROB_UNKNOWN, "must have some guess at this point");
912  return prob;
913}
914
915// The magic constants are chosen so as to match the output of
916// branch_prediction() when the profile reports a zero taken count.
917// It is important to distinguish zero counts unambiguously, because
918// some branches (e.g., _213_javac.Assembler.eliminate) validly produce
919// very small but nonzero probabilities, which if confused with zero
920// counts would keep the program recompiling indefinitely.
921bool Parse::seems_never_taken(float prob) const {
922  return prob < PROB_MIN;
923}
924
925// True if the comparison seems to be the kind that will not change its
926// statistics from true to false.  See comments in adjust_map_after_if.
927// This question is only asked along paths which are already
928// classifed as untaken (by seems_never_taken), so really,
929// if a path is never taken, its controlling comparison is
930// already acting in a stable fashion.  If the comparison
931// seems stable, we will put an expensive uncommon trap
932// on the untaken path.
933bool Parse::seems_stable_comparison() const {
934  if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
935    return false;
936  }
937  return true;
938}
939
940//-------------------------------repush_if_args--------------------------------
941// Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
942inline int Parse::repush_if_args() {
943#ifndef PRODUCT
944  if (PrintOpto && WizardMode) {
945    tty->print("defending against excessive implicit null exceptions on %s @%d in ",
946               Bytecodes::name(iter().cur_bc()), iter().cur_bci());
947    method()->print_name(); tty->cr();
948  }
949#endif
950  int bc_depth = - Bytecodes::depth(iter().cur_bc());
951  assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
952  DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
953  assert(argument(0) != NULL, "must exist");
954  assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
955  inc_sp(bc_depth);
956  return bc_depth;
957}
958
959//----------------------------------do_ifnull----------------------------------
960void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
961  int target_bci = iter().get_dest();
962
963  Block* branch_block = successor_for_bci(target_bci);
964  Block* next_block   = successor_for_bci(iter().next_bci());
965
966  float cnt;
967  float prob = branch_prediction(cnt, btest, target_bci, c);
968  if (prob == PROB_UNKNOWN) {
969    // (An earlier version of do_ifnull omitted this trap for OSR methods.)
970#ifndef PRODUCT
971    if (PrintOpto && Verbose)
972      tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
973#endif
974    repush_if_args(); // to gather stats on loop
975    // We need to mark this branch as taken so that if we recompile we will
976    // see that it is possible. In the tiered system the interpreter doesn't
977    // do profiling and by the time we get to the lower tier from the interpreter
978    // the path may be cold again. Make sure it doesn't look untaken
979    profile_taken_branch(target_bci, !ProfileInterpreter);
980    uncommon_trap(Deoptimization::Reason_unreached,
981                  Deoptimization::Action_reinterpret,
982                  NULL, "cold");
983    if (C->eliminate_boxing()) {
984      // Mark the successor blocks as parsed
985      branch_block->next_path_num();
986      next_block->next_path_num();
987    }
988    return;
989  }
990
991  explicit_null_checks_inserted++;
992
993  // Generate real control flow
994  Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
995
996  // Sanity check the probability value
997  assert(prob > 0.0f,"Bad probability in Parser");
998 // Need xform to put node in hash table
999  IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1000  assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1001  // True branch
1002  { PreserveJVMState pjvms(this);
1003    Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1004    set_control(iftrue);
1005
1006    if (stopped()) {            // Path is dead?
1007      explicit_null_checks_elided++;
1008      if (C->eliminate_boxing()) {
1009        // Mark the successor block as parsed
1010        branch_block->next_path_num();
1011      }
1012    } else {                    // Path is live.
1013      // Update method data
1014      profile_taken_branch(target_bci);
1015      adjust_map_after_if(btest, c, prob, branch_block, next_block);
1016      if (!stopped()) {
1017        merge(target_bci);
1018      }
1019    }
1020  }
1021
1022  // False branch
1023  Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1024  set_control(iffalse);
1025
1026  if (stopped()) {              // Path is dead?
1027    explicit_null_checks_elided++;
1028    if (C->eliminate_boxing()) {
1029      // Mark the successor block as parsed
1030      next_block->next_path_num();
1031    }
1032  } else  {                     // Path is live.
1033    // Update method data
1034    profile_not_taken_branch();
1035    adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1036                        next_block, branch_block);
1037  }
1038}
1039
1040//------------------------------------do_if------------------------------------
1041void Parse::do_if(BoolTest::mask btest, Node* c) {
1042  int target_bci = iter().get_dest();
1043
1044  Block* branch_block = successor_for_bci(target_bci);
1045  Block* next_block   = successor_for_bci(iter().next_bci());
1046
1047  float cnt;
1048  float prob = branch_prediction(cnt, btest, target_bci, c);
1049  float untaken_prob = 1.0 - prob;
1050
1051  if (prob == PROB_UNKNOWN) {
1052#ifndef PRODUCT
1053    if (PrintOpto && Verbose)
1054      tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
1055#endif
1056    repush_if_args(); // to gather stats on loop
1057    // We need to mark this branch as taken so that if we recompile we will
1058    // see that it is possible. In the tiered system the interpreter doesn't
1059    // do profiling and by the time we get to the lower tier from the interpreter
1060    // the path may be cold again. Make sure it doesn't look untaken
1061    profile_taken_branch(target_bci, !ProfileInterpreter);
1062    uncommon_trap(Deoptimization::Reason_unreached,
1063                  Deoptimization::Action_reinterpret,
1064                  NULL, "cold");
1065    if (C->eliminate_boxing()) {
1066      // Mark the successor blocks as parsed
1067      branch_block->next_path_num();
1068      next_block->next_path_num();
1069    }
1070    return;
1071  }
1072
1073  // Sanity check the probability value
1074  assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1075
1076  bool taken_if_true = true;
1077  // Convert BoolTest to canonical form:
1078  if (!BoolTest(btest).is_canonical()) {
1079    btest         = BoolTest(btest).negate();
1080    taken_if_true = false;
1081    // prob is NOT updated here; it remains the probability of the taken
1082    // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1083  }
1084  assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1085
1086  Node* tst0 = new BoolNode(c, btest);
1087  Node* tst = _gvn.transform(tst0);
1088  BoolTest::mask taken_btest   = BoolTest::illegal;
1089  BoolTest::mask untaken_btest = BoolTest::illegal;
1090
1091  if (tst->is_Bool()) {
1092    // Refresh c from the transformed bool node, since it may be
1093    // simpler than the original c.  Also re-canonicalize btest.
1094    // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1095    // That can arise from statements like: if (x instanceof C) ...
1096    if (tst != tst0) {
1097      // Canonicalize one more time since transform can change it.
1098      btest = tst->as_Bool()->_test._test;
1099      if (!BoolTest(btest).is_canonical()) {
1100        // Reverse edges one more time...
1101        tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1102        btest = tst->as_Bool()->_test._test;
1103        assert(BoolTest(btest).is_canonical(), "sanity");
1104        taken_if_true = !taken_if_true;
1105      }
1106      c = tst->in(1);
1107    }
1108    BoolTest::mask neg_btest = BoolTest(btest).negate();
1109    taken_btest   = taken_if_true ?     btest : neg_btest;
1110    untaken_btest = taken_if_true ? neg_btest :     btest;
1111  }
1112
1113  // Generate real control flow
1114  float true_prob = (taken_if_true ? prob : untaken_prob);
1115  IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1116  assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1117  Node* taken_branch   = new IfTrueNode(iff);
1118  Node* untaken_branch = new IfFalseNode(iff);
1119  if (!taken_if_true) {  // Finish conversion to canonical form
1120    Node* tmp      = taken_branch;
1121    taken_branch   = untaken_branch;
1122    untaken_branch = tmp;
1123  }
1124
1125  // Branch is taken:
1126  { PreserveJVMState pjvms(this);
1127    taken_branch = _gvn.transform(taken_branch);
1128    set_control(taken_branch);
1129
1130    if (stopped()) {
1131      if (C->eliminate_boxing()) {
1132        // Mark the successor block as parsed
1133        branch_block->next_path_num();
1134      }
1135    } else {
1136      // Update method data
1137      profile_taken_branch(target_bci);
1138      adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1139      if (!stopped()) {
1140        merge(target_bci);
1141      }
1142    }
1143  }
1144
1145  untaken_branch = _gvn.transform(untaken_branch);
1146  set_control(untaken_branch);
1147
1148  // Branch not taken.
1149  if (stopped()) {
1150    if (C->eliminate_boxing()) {
1151      // Mark the successor block as parsed
1152      next_block->next_path_num();
1153    }
1154  } else {
1155    // Update method data
1156    profile_not_taken_branch();
1157    adjust_map_after_if(untaken_btest, c, untaken_prob,
1158                        next_block, branch_block);
1159  }
1160}
1161
1162bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1163  // Don't want to speculate on uncommon traps when running with -Xcomp
1164  if (!UseInterpreter) {
1165    return false;
1166  }
1167  return (seems_never_taken(prob) && seems_stable_comparison());
1168}
1169
1170//----------------------------adjust_map_after_if------------------------------
1171// Adjust the JVM state to reflect the result of taking this path.
1172// Basically, it means inspecting the CmpNode controlling this
1173// branch, seeing how it constrains a tested value, and then
1174// deciding if it's worth our while to encode this constraint
1175// as graph nodes in the current abstract interpretation map.
1176void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1177                                Block* path, Block* other_path) {
1178  if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1179    return;                             // nothing to do
1180
1181  bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1182
1183  if (path_is_suitable_for_uncommon_trap(prob)) {
1184    repush_if_args();
1185    uncommon_trap(Deoptimization::Reason_unstable_if,
1186                  Deoptimization::Action_reinterpret,
1187                  NULL,
1188                  (is_fallthrough ? "taken always" : "taken never"));
1189    return;
1190  }
1191
1192  Node* val = c->in(1);
1193  Node* con = c->in(2);
1194  const Type* tcon = _gvn.type(con);
1195  const Type* tval = _gvn.type(val);
1196  bool have_con = tcon->singleton();
1197  if (tval->singleton()) {
1198    if (!have_con) {
1199      // Swap, so constant is in con.
1200      con  = val;
1201      tcon = tval;
1202      val  = c->in(2);
1203      tval = _gvn.type(val);
1204      btest = BoolTest(btest).commute();
1205      have_con = true;
1206    } else {
1207      // Do we have two constants?  Then leave well enough alone.
1208      have_con = false;
1209    }
1210  }
1211  if (!have_con)                        // remaining adjustments need a con
1212    return;
1213
1214  sharpen_type_after_if(btest, con, tcon, val, tval);
1215}
1216
1217
1218static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
1219  Node* ldk;
1220  if (n->is_DecodeNKlass()) {
1221    if (n->in(1)->Opcode() != Op_LoadNKlass) {
1222      return NULL;
1223    } else {
1224      ldk = n->in(1);
1225    }
1226  } else if (n->Opcode() != Op_LoadKlass) {
1227    return NULL;
1228  } else {
1229    ldk = n;
1230  }
1231  assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
1232
1233  Node* adr = ldk->in(MemNode::Address);
1234  intptr_t off = 0;
1235  Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
1236  if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
1237    return NULL;
1238  const TypePtr* tp = gvn->type(obj)->is_ptr();
1239  if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
1240    return NULL;
1241
1242  return obj;
1243}
1244
1245void Parse::sharpen_type_after_if(BoolTest::mask btest,
1246                                  Node* con, const Type* tcon,
1247                                  Node* val, const Type* tval) {
1248  // Look for opportunities to sharpen the type of a node
1249  // whose klass is compared with a constant klass.
1250  if (btest == BoolTest::eq && tcon->isa_klassptr()) {
1251    Node* obj = extract_obj_from_klass_load(&_gvn, val);
1252    const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
1253    if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1254       // Found:
1255       //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1256       // or the narrowOop equivalent.
1257       const Type* obj_type = _gvn.type(obj);
1258       const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1259       if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
1260           tboth->higher_equal(obj_type)) {
1261          // obj has to be of the exact type Foo if the CmpP succeeds.
1262          int obj_in_map = map()->find_edge(obj);
1263          JVMState* jvms = this->jvms();
1264          if (obj_in_map >= 0 &&
1265              (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1266            TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1267            const Type* tcc = ccast->as_Type()->type();
1268            assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1269            // Delay transform() call to allow recovery of pre-cast value
1270            // at the control merge.
1271            _gvn.set_type_bottom(ccast);
1272            record_for_igvn(ccast);
1273            // Here's the payoff.
1274            replace_in_map(obj, ccast);
1275          }
1276       }
1277    }
1278  }
1279
1280  int val_in_map = map()->find_edge(val);
1281  if (val_in_map < 0)  return;          // replace_in_map would be useless
1282  {
1283    JVMState* jvms = this->jvms();
1284    if (!(jvms->is_loc(val_in_map) ||
1285          jvms->is_stk(val_in_map)))
1286      return;                           // again, it would be useless
1287  }
1288
1289  // Check for a comparison to a constant, and "know" that the compared
1290  // value is constrained on this path.
1291  assert(tcon->singleton(), "");
1292  ConstraintCastNode* ccast = NULL;
1293  Node* cast = NULL;
1294
1295  switch (btest) {
1296  case BoolTest::eq:                    // Constant test?
1297    {
1298      const Type* tboth = tcon->join_speculative(tval);
1299      if (tboth == tval)  break;        // Nothing to gain.
1300      if (tcon->isa_int()) {
1301        ccast = new CastIINode(val, tboth);
1302      } else if (tcon == TypePtr::NULL_PTR) {
1303        // Cast to null, but keep the pointer identity temporarily live.
1304        ccast = new CastPPNode(val, tboth);
1305      } else {
1306        const TypeF* tf = tcon->isa_float_constant();
1307        const TypeD* td = tcon->isa_double_constant();
1308        // Exclude tests vs float/double 0 as these could be
1309        // either +0 or -0.  Just because you are equal to +0
1310        // doesn't mean you ARE +0!
1311        // Note, following code also replaces Long and Oop values.
1312        if ((!tf || tf->_f != 0.0) &&
1313            (!td || td->_d != 0.0))
1314          cast = con;                   // Replace non-constant val by con.
1315      }
1316    }
1317    break;
1318
1319  case BoolTest::ne:
1320    if (tcon == TypePtr::NULL_PTR) {
1321      cast = cast_not_null(val, false);
1322    }
1323    break;
1324
1325  default:
1326    // (At this point we could record int range types with CastII.)
1327    break;
1328  }
1329
1330  if (ccast != NULL) {
1331    const Type* tcc = ccast->as_Type()->type();
1332    assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1333    // Delay transform() call to allow recovery of pre-cast value
1334    // at the control merge.
1335    ccast->set_req(0, control());
1336    _gvn.set_type_bottom(ccast);
1337    record_for_igvn(ccast);
1338    cast = ccast;
1339  }
1340
1341  if (cast != NULL) {                   // Here's the payoff.
1342    replace_in_map(val, cast);
1343  }
1344}
1345
1346/**
1347 * Use speculative type to optimize CmpP node: if comparison is
1348 * against the low level class, cast the object to the speculative
1349 * type if any. CmpP should then go away.
1350 *
1351 * @param c  expected CmpP node
1352 * @return   result of CmpP on object casted to speculative type
1353 *
1354 */
1355Node* Parse::optimize_cmp_with_klass(Node* c) {
1356  // If this is transformed by the _gvn to a comparison with the low
1357  // level klass then we may be able to use speculation
1358  if (c->Opcode() == Op_CmpP &&
1359      (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1360      c->in(2)->is_Con()) {
1361    Node* load_klass = NULL;
1362    Node* decode = NULL;
1363    if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1364      decode = c->in(1);
1365      load_klass = c->in(1)->in(1);
1366    } else {
1367      load_klass = c->in(1);
1368    }
1369    if (load_klass->in(2)->is_AddP()) {
1370      Node* addp = load_klass->in(2);
1371      Node* obj = addp->in(AddPNode::Address);
1372      const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1373      if (obj_type->speculative_type_not_null() != NULL) {
1374        ciKlass* k = obj_type->speculative_type();
1375        inc_sp(2);
1376        obj = maybe_cast_profiled_obj(obj, k);
1377        dec_sp(2);
1378        // Make the CmpP use the casted obj
1379        addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1380        load_klass = load_klass->clone();
1381        load_klass->set_req(2, addp);
1382        load_klass = _gvn.transform(load_klass);
1383        if (decode != NULL) {
1384          decode = decode->clone();
1385          decode->set_req(1, load_klass);
1386          load_klass = _gvn.transform(decode);
1387        }
1388        c = c->clone();
1389        c->set_req(1, load_klass);
1390        c = _gvn.transform(c);
1391      }
1392    }
1393  }
1394  return c;
1395}
1396
1397//------------------------------do_one_bytecode--------------------------------
1398// Parse this bytecode, and alter the Parsers JVM->Node mapping
1399void Parse::do_one_bytecode() {
1400  Node *a, *b, *c, *d;          // Handy temps
1401  BoolTest::mask btest;
1402  int i;
1403
1404  assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1405
1406  if (C->check_node_count(NodeLimitFudgeFactor * 5,
1407                          "out of nodes parsing method")) {
1408    return;
1409  }
1410
1411#ifdef ASSERT
1412  // for setting breakpoints
1413  if (TraceOptoParse) {
1414    tty->print(" @");
1415    dump_bci(bci());
1416    tty->cr();
1417  }
1418#endif
1419
1420  switch (bc()) {
1421  case Bytecodes::_nop:
1422    // do nothing
1423    break;
1424  case Bytecodes::_lconst_0:
1425    push_pair(longcon(0));
1426    break;
1427
1428  case Bytecodes::_lconst_1:
1429    push_pair(longcon(1));
1430    break;
1431
1432  case Bytecodes::_fconst_0:
1433    push(zerocon(T_FLOAT));
1434    break;
1435
1436  case Bytecodes::_fconst_1:
1437    push(makecon(TypeF::ONE));
1438    break;
1439
1440  case Bytecodes::_fconst_2:
1441    push(makecon(TypeF::make(2.0f)));
1442    break;
1443
1444  case Bytecodes::_dconst_0:
1445    push_pair(zerocon(T_DOUBLE));
1446    break;
1447
1448  case Bytecodes::_dconst_1:
1449    push_pair(makecon(TypeD::ONE));
1450    break;
1451
1452  case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1453  case Bytecodes::_iconst_0: push(intcon( 0)); break;
1454  case Bytecodes::_iconst_1: push(intcon( 1)); break;
1455  case Bytecodes::_iconst_2: push(intcon( 2)); break;
1456  case Bytecodes::_iconst_3: push(intcon( 3)); break;
1457  case Bytecodes::_iconst_4: push(intcon( 4)); break;
1458  case Bytecodes::_iconst_5: push(intcon( 5)); break;
1459  case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
1460  case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
1461  case Bytecodes::_aconst_null: push(null());  break;
1462  case Bytecodes::_ldc:
1463  case Bytecodes::_ldc_w:
1464  case Bytecodes::_ldc2_w:
1465    // If the constant is unresolved, run this BC once in the interpreter.
1466    {
1467      ciConstant constant = iter().get_constant();
1468      if (constant.basic_type() == T_OBJECT &&
1469          !constant.as_object()->is_loaded()) {
1470        int index = iter().get_constant_pool_index();
1471        constantTag tag = iter().get_constant_pool_tag(index);
1472        uncommon_trap(Deoptimization::make_trap_request
1473                      (Deoptimization::Reason_unloaded,
1474                       Deoptimization::Action_reinterpret,
1475                       index),
1476                      NULL, tag.internal_name());
1477        break;
1478      }
1479      assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(),
1480             "must be java_mirror of klass");
1481      bool pushed = push_constant(constant, true);
1482      guarantee(pushed, "must be possible to push this constant");
1483    }
1484
1485    break;
1486
1487  case Bytecodes::_aload_0:
1488    push( local(0) );
1489    break;
1490  case Bytecodes::_aload_1:
1491    push( local(1) );
1492    break;
1493  case Bytecodes::_aload_2:
1494    push( local(2) );
1495    break;
1496  case Bytecodes::_aload_3:
1497    push( local(3) );
1498    break;
1499  case Bytecodes::_aload:
1500    push( local(iter().get_index()) );
1501    break;
1502
1503  case Bytecodes::_fload_0:
1504  case Bytecodes::_iload_0:
1505    push( local(0) );
1506    break;
1507  case Bytecodes::_fload_1:
1508  case Bytecodes::_iload_1:
1509    push( local(1) );
1510    break;
1511  case Bytecodes::_fload_2:
1512  case Bytecodes::_iload_2:
1513    push( local(2) );
1514    break;
1515  case Bytecodes::_fload_3:
1516  case Bytecodes::_iload_3:
1517    push( local(3) );
1518    break;
1519  case Bytecodes::_fload:
1520  case Bytecodes::_iload:
1521    push( local(iter().get_index()) );
1522    break;
1523  case Bytecodes::_lload_0:
1524    push_pair_local( 0 );
1525    break;
1526  case Bytecodes::_lload_1:
1527    push_pair_local( 1 );
1528    break;
1529  case Bytecodes::_lload_2:
1530    push_pair_local( 2 );
1531    break;
1532  case Bytecodes::_lload_3:
1533    push_pair_local( 3 );
1534    break;
1535  case Bytecodes::_lload:
1536    push_pair_local( iter().get_index() );
1537    break;
1538
1539  case Bytecodes::_dload_0:
1540    push_pair_local(0);
1541    break;
1542  case Bytecodes::_dload_1:
1543    push_pair_local(1);
1544    break;
1545  case Bytecodes::_dload_2:
1546    push_pair_local(2);
1547    break;
1548  case Bytecodes::_dload_3:
1549    push_pair_local(3);
1550    break;
1551  case Bytecodes::_dload:
1552    push_pair_local(iter().get_index());
1553    break;
1554  case Bytecodes::_fstore_0:
1555  case Bytecodes::_istore_0:
1556  case Bytecodes::_astore_0:
1557    set_local( 0, pop() );
1558    break;
1559  case Bytecodes::_fstore_1:
1560  case Bytecodes::_istore_1:
1561  case Bytecodes::_astore_1:
1562    set_local( 1, pop() );
1563    break;
1564  case Bytecodes::_fstore_2:
1565  case Bytecodes::_istore_2:
1566  case Bytecodes::_astore_2:
1567    set_local( 2, pop() );
1568    break;
1569  case Bytecodes::_fstore_3:
1570  case Bytecodes::_istore_3:
1571  case Bytecodes::_astore_3:
1572    set_local( 3, pop() );
1573    break;
1574  case Bytecodes::_fstore:
1575  case Bytecodes::_istore:
1576  case Bytecodes::_astore:
1577    set_local( iter().get_index(), pop() );
1578    break;
1579  // long stores
1580  case Bytecodes::_lstore_0:
1581    set_pair_local( 0, pop_pair() );
1582    break;
1583  case Bytecodes::_lstore_1:
1584    set_pair_local( 1, pop_pair() );
1585    break;
1586  case Bytecodes::_lstore_2:
1587    set_pair_local( 2, pop_pair() );
1588    break;
1589  case Bytecodes::_lstore_3:
1590    set_pair_local( 3, pop_pair() );
1591    break;
1592  case Bytecodes::_lstore:
1593    set_pair_local( iter().get_index(), pop_pair() );
1594    break;
1595
1596  // double stores
1597  case Bytecodes::_dstore_0:
1598    set_pair_local( 0, dstore_rounding(pop_pair()) );
1599    break;
1600  case Bytecodes::_dstore_1:
1601    set_pair_local( 1, dstore_rounding(pop_pair()) );
1602    break;
1603  case Bytecodes::_dstore_2:
1604    set_pair_local( 2, dstore_rounding(pop_pair()) );
1605    break;
1606  case Bytecodes::_dstore_3:
1607    set_pair_local( 3, dstore_rounding(pop_pair()) );
1608    break;
1609  case Bytecodes::_dstore:
1610    set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1611    break;
1612
1613  case Bytecodes::_pop:  dec_sp(1);   break;
1614  case Bytecodes::_pop2: dec_sp(2);   break;
1615  case Bytecodes::_swap:
1616    a = pop();
1617    b = pop();
1618    push(a);
1619    push(b);
1620    break;
1621  case Bytecodes::_dup:
1622    a = pop();
1623    push(a);
1624    push(a);
1625    break;
1626  case Bytecodes::_dup_x1:
1627    a = pop();
1628    b = pop();
1629    push( a );
1630    push( b );
1631    push( a );
1632    break;
1633  case Bytecodes::_dup_x2:
1634    a = pop();
1635    b = pop();
1636    c = pop();
1637    push( a );
1638    push( c );
1639    push( b );
1640    push( a );
1641    break;
1642  case Bytecodes::_dup2:
1643    a = pop();
1644    b = pop();
1645    push( b );
1646    push( a );
1647    push( b );
1648    push( a );
1649    break;
1650
1651  case Bytecodes::_dup2_x1:
1652    // before: .. c, b, a
1653    // after:  .. b, a, c, b, a
1654    // not tested
1655    a = pop();
1656    b = pop();
1657    c = pop();
1658    push( b );
1659    push( a );
1660    push( c );
1661    push( b );
1662    push( a );
1663    break;
1664  case Bytecodes::_dup2_x2:
1665    // before: .. d, c, b, a
1666    // after:  .. b, a, d, c, b, a
1667    // not tested
1668    a = pop();
1669    b = pop();
1670    c = pop();
1671    d = pop();
1672    push( b );
1673    push( a );
1674    push( d );
1675    push( c );
1676    push( b );
1677    push( a );
1678    break;
1679
1680  case Bytecodes::_arraylength: {
1681    // Must do null-check with value on expression stack
1682    Node *ary = null_check(peek(), T_ARRAY);
1683    // Compile-time detect of null-exception?
1684    if (stopped())  return;
1685    a = pop();
1686    push(load_array_length(a));
1687    break;
1688  }
1689
1690  case Bytecodes::_baload: array_load(T_BYTE);   break;
1691  case Bytecodes::_caload: array_load(T_CHAR);   break;
1692  case Bytecodes::_iaload: array_load(T_INT);    break;
1693  case Bytecodes::_saload: array_load(T_SHORT);  break;
1694  case Bytecodes::_faload: array_load(T_FLOAT);  break;
1695  case Bytecodes::_aaload: array_load(T_OBJECT); break;
1696  case Bytecodes::_laload: {
1697    a = array_addressing(T_LONG, 0);
1698    if (stopped())  return;     // guaranteed null or range check
1699    dec_sp(2);                  // Pop array and index
1700    push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered));
1701    break;
1702  }
1703  case Bytecodes::_daload: {
1704    a = array_addressing(T_DOUBLE, 0);
1705    if (stopped())  return;     // guaranteed null or range check
1706    dec_sp(2);                  // Pop array and index
1707    push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered));
1708    break;
1709  }
1710  case Bytecodes::_bastore: array_store(T_BYTE);  break;
1711  case Bytecodes::_castore: array_store(T_CHAR);  break;
1712  case Bytecodes::_iastore: array_store(T_INT);   break;
1713  case Bytecodes::_sastore: array_store(T_SHORT); break;
1714  case Bytecodes::_fastore: array_store(T_FLOAT); break;
1715  case Bytecodes::_aastore: {
1716    d = array_addressing(T_OBJECT, 1);
1717    if (stopped())  return;     // guaranteed null or range check
1718    array_store_check();
1719    c = pop();                  // Oop to store
1720    b = pop();                  // index (already used)
1721    a = pop();                  // the array itself
1722    const TypeOopPtr* elemtype  = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
1723    const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1724    Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT,
1725                                     StoreNode::release_if_reference(T_OBJECT));
1726    break;
1727  }
1728  case Bytecodes::_lastore: {
1729    a = array_addressing(T_LONG, 2);
1730    if (stopped())  return;     // guaranteed null or range check
1731    c = pop_pair();
1732    dec_sp(2);                  // Pop array and index
1733    store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered);
1734    break;
1735  }
1736  case Bytecodes::_dastore: {
1737    a = array_addressing(T_DOUBLE, 2);
1738    if (stopped())  return;     // guaranteed null or range check
1739    c = pop_pair();
1740    dec_sp(2);                  // Pop array and index
1741    c = dstore_rounding(c);
1742    store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered);
1743    break;
1744  }
1745  case Bytecodes::_getfield:
1746    do_getfield();
1747    break;
1748
1749  case Bytecodes::_getstatic:
1750    do_getstatic();
1751    break;
1752
1753  case Bytecodes::_putfield:
1754    do_putfield();
1755    break;
1756
1757  case Bytecodes::_putstatic:
1758    do_putstatic();
1759    break;
1760
1761  case Bytecodes::_irem:
1762    do_irem();
1763    break;
1764  case Bytecodes::_idiv:
1765    // Must keep both values on the expression-stack during null-check
1766    zero_check_int(peek());
1767    // Compile-time detect of null-exception?
1768    if (stopped())  return;
1769    b = pop();
1770    a = pop();
1771    push( _gvn.transform( new DivINode(control(),a,b) ) );
1772    break;
1773  case Bytecodes::_imul:
1774    b = pop(); a = pop();
1775    push( _gvn.transform( new MulINode(a,b) ) );
1776    break;
1777  case Bytecodes::_iadd:
1778    b = pop(); a = pop();
1779    push( _gvn.transform( new AddINode(a,b) ) );
1780    break;
1781  case Bytecodes::_ineg:
1782    a = pop();
1783    push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
1784    break;
1785  case Bytecodes::_isub:
1786    b = pop(); a = pop();
1787    push( _gvn.transform( new SubINode(a,b) ) );
1788    break;
1789  case Bytecodes::_iand:
1790    b = pop(); a = pop();
1791    push( _gvn.transform( new AndINode(a,b) ) );
1792    break;
1793  case Bytecodes::_ior:
1794    b = pop(); a = pop();
1795    push( _gvn.transform( new OrINode(a,b) ) );
1796    break;
1797  case Bytecodes::_ixor:
1798    b = pop(); a = pop();
1799    push( _gvn.transform( new XorINode(a,b) ) );
1800    break;
1801  case Bytecodes::_ishl:
1802    b = pop(); a = pop();
1803    push( _gvn.transform( new LShiftINode(a,b) ) );
1804    break;
1805  case Bytecodes::_ishr:
1806    b = pop(); a = pop();
1807    push( _gvn.transform( new RShiftINode(a,b) ) );
1808    break;
1809  case Bytecodes::_iushr:
1810    b = pop(); a = pop();
1811    push( _gvn.transform( new URShiftINode(a,b) ) );
1812    break;
1813
1814  case Bytecodes::_fneg:
1815    a = pop();
1816    b = _gvn.transform(new NegFNode (a));
1817    push(b);
1818    break;
1819
1820  case Bytecodes::_fsub:
1821    b = pop();
1822    a = pop();
1823    c = _gvn.transform( new SubFNode(a,b) );
1824    d = precision_rounding(c);
1825    push( d );
1826    break;
1827
1828  case Bytecodes::_fadd:
1829    b = pop();
1830    a = pop();
1831    c = _gvn.transform( new AddFNode(a,b) );
1832    d = precision_rounding(c);
1833    push( d );
1834    break;
1835
1836  case Bytecodes::_fmul:
1837    b = pop();
1838    a = pop();
1839    c = _gvn.transform( new MulFNode(a,b) );
1840    d = precision_rounding(c);
1841    push( d );
1842    break;
1843
1844  case Bytecodes::_fdiv:
1845    b = pop();
1846    a = pop();
1847    c = _gvn.transform( new DivFNode(0,a,b) );
1848    d = precision_rounding(c);
1849    push( d );
1850    break;
1851
1852  case Bytecodes::_frem:
1853    if (Matcher::has_match_rule(Op_ModF)) {
1854      // Generate a ModF node.
1855      b = pop();
1856      a = pop();
1857      c = _gvn.transform( new ModFNode(0,a,b) );
1858      d = precision_rounding(c);
1859      push( d );
1860    }
1861    else {
1862      // Generate a call.
1863      modf();
1864    }
1865    break;
1866
1867  case Bytecodes::_fcmpl:
1868    b = pop();
1869    a = pop();
1870    c = _gvn.transform( new CmpF3Node( a, b));
1871    push(c);
1872    break;
1873  case Bytecodes::_fcmpg:
1874    b = pop();
1875    a = pop();
1876
1877    // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
1878    // which negates the result sign except for unordered.  Flip the unordered
1879    // as well by using CmpF3 which implements unordered-lesser instead of
1880    // unordered-greater semantics.  Finally, commute the result bits.  Result
1881    // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1882    c = _gvn.transform( new CmpF3Node( b, a));
1883    c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
1884    push(c);
1885    break;
1886
1887  case Bytecodes::_f2i:
1888    a = pop();
1889    push(_gvn.transform(new ConvF2INode(a)));
1890    break;
1891
1892  case Bytecodes::_d2i:
1893    a = pop_pair();
1894    b = _gvn.transform(new ConvD2INode(a));
1895    push( b );
1896    break;
1897
1898  case Bytecodes::_f2d:
1899    a = pop();
1900    b = _gvn.transform( new ConvF2DNode(a));
1901    push_pair( b );
1902    break;
1903
1904  case Bytecodes::_d2f:
1905    a = pop_pair();
1906    b = _gvn.transform( new ConvD2FNode(a));
1907    // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1908    //b = _gvn.transform(new RoundFloatNode(0, b) );
1909    push( b );
1910    break;
1911
1912  case Bytecodes::_l2f:
1913    if (Matcher::convL2FSupported()) {
1914      a = pop_pair();
1915      b = _gvn.transform( new ConvL2FNode(a));
1916      // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1917      // Rather than storing the result into an FP register then pushing
1918      // out to memory to round, the machine instruction that implements
1919      // ConvL2D is responsible for rounding.
1920      // c = precision_rounding(b);
1921      c = _gvn.transform(b);
1922      push(c);
1923    } else {
1924      l2f();
1925    }
1926    break;
1927
1928  case Bytecodes::_l2d:
1929    a = pop_pair();
1930    b = _gvn.transform( new ConvL2DNode(a));
1931    // For i486.ad, rounding is always necessary (see _l2f above).
1932    // c = dprecision_rounding(b);
1933    c = _gvn.transform(b);
1934    push_pair(c);
1935    break;
1936
1937  case Bytecodes::_f2l:
1938    a = pop();
1939    b = _gvn.transform( new ConvF2LNode(a));
1940    push_pair(b);
1941    break;
1942
1943  case Bytecodes::_d2l:
1944    a = pop_pair();
1945    b = _gvn.transform( new ConvD2LNode(a));
1946    push_pair(b);
1947    break;
1948
1949  case Bytecodes::_dsub:
1950    b = pop_pair();
1951    a = pop_pair();
1952    c = _gvn.transform( new SubDNode(a,b) );
1953    d = dprecision_rounding(c);
1954    push_pair( d );
1955    break;
1956
1957  case Bytecodes::_dadd:
1958    b = pop_pair();
1959    a = pop_pair();
1960    c = _gvn.transform( new AddDNode(a,b) );
1961    d = dprecision_rounding(c);
1962    push_pair( d );
1963    break;
1964
1965  case Bytecodes::_dmul:
1966    b = pop_pair();
1967    a = pop_pair();
1968    c = _gvn.transform( new MulDNode(a,b) );
1969    d = dprecision_rounding(c);
1970    push_pair( d );
1971    break;
1972
1973  case Bytecodes::_ddiv:
1974    b = pop_pair();
1975    a = pop_pair();
1976    c = _gvn.transform( new DivDNode(0,a,b) );
1977    d = dprecision_rounding(c);
1978    push_pair( d );
1979    break;
1980
1981  case Bytecodes::_dneg:
1982    a = pop_pair();
1983    b = _gvn.transform(new NegDNode (a));
1984    push_pair(b);
1985    break;
1986
1987  case Bytecodes::_drem:
1988    if (Matcher::has_match_rule(Op_ModD)) {
1989      // Generate a ModD node.
1990      b = pop_pair();
1991      a = pop_pair();
1992      // a % b
1993
1994      c = _gvn.transform( new ModDNode(0,a,b) );
1995      d = dprecision_rounding(c);
1996      push_pair( d );
1997    }
1998    else {
1999      // Generate a call.
2000      modd();
2001    }
2002    break;
2003
2004  case Bytecodes::_dcmpl:
2005    b = pop_pair();
2006    a = pop_pair();
2007    c = _gvn.transform( new CmpD3Node( a, b));
2008    push(c);
2009    break;
2010
2011  case Bytecodes::_dcmpg:
2012    b = pop_pair();
2013    a = pop_pair();
2014    // Same as dcmpl but need to flip the unordered case.
2015    // Commute the inputs, which negates the result sign except for unordered.
2016    // Flip the unordered as well by using CmpD3 which implements
2017    // unordered-lesser instead of unordered-greater semantics.
2018    // Finally, negate the result bits.  Result is same as using a
2019    // CmpD3Greater except we did it with CmpD3 alone.
2020    c = _gvn.transform( new CmpD3Node( b, a));
2021    c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
2022    push(c);
2023    break;
2024
2025
2026    // Note for longs -> lo word is on TOS, hi word is on TOS - 1
2027  case Bytecodes::_land:
2028    b = pop_pair();
2029    a = pop_pair();
2030    c = _gvn.transform( new AndLNode(a,b) );
2031    push_pair(c);
2032    break;
2033  case Bytecodes::_lor:
2034    b = pop_pair();
2035    a = pop_pair();
2036    c = _gvn.transform( new OrLNode(a,b) );
2037    push_pair(c);
2038    break;
2039  case Bytecodes::_lxor:
2040    b = pop_pair();
2041    a = pop_pair();
2042    c = _gvn.transform( new XorLNode(a,b) );
2043    push_pair(c);
2044    break;
2045
2046  case Bytecodes::_lshl:
2047    b = pop();                  // the shift count
2048    a = pop_pair();             // value to be shifted
2049    c = _gvn.transform( new LShiftLNode(a,b) );
2050    push_pair(c);
2051    break;
2052  case Bytecodes::_lshr:
2053    b = pop();                  // the shift count
2054    a = pop_pair();             // value to be shifted
2055    c = _gvn.transform( new RShiftLNode(a,b) );
2056    push_pair(c);
2057    break;
2058  case Bytecodes::_lushr:
2059    b = pop();                  // the shift count
2060    a = pop_pair();             // value to be shifted
2061    c = _gvn.transform( new URShiftLNode(a,b) );
2062    push_pair(c);
2063    break;
2064  case Bytecodes::_lmul:
2065    b = pop_pair();
2066    a = pop_pair();
2067    c = _gvn.transform( new MulLNode(a,b) );
2068    push_pair(c);
2069    break;
2070
2071  case Bytecodes::_lrem:
2072    // Must keep both values on the expression-stack during null-check
2073    assert(peek(0) == top(), "long word order");
2074    zero_check_long(peek(1));
2075    // Compile-time detect of null-exception?
2076    if (stopped())  return;
2077    b = pop_pair();
2078    a = pop_pair();
2079    c = _gvn.transform( new ModLNode(control(),a,b) );
2080    push_pair(c);
2081    break;
2082
2083  case Bytecodes::_ldiv:
2084    // Must keep both values on the expression-stack during null-check
2085    assert(peek(0) == top(), "long word order");
2086    zero_check_long(peek(1));
2087    // Compile-time detect of null-exception?
2088    if (stopped())  return;
2089    b = pop_pair();
2090    a = pop_pair();
2091    c = _gvn.transform( new DivLNode(control(),a,b) );
2092    push_pair(c);
2093    break;
2094
2095  case Bytecodes::_ladd:
2096    b = pop_pair();
2097    a = pop_pair();
2098    c = _gvn.transform( new AddLNode(a,b) );
2099    push_pair(c);
2100    break;
2101  case Bytecodes::_lsub:
2102    b = pop_pair();
2103    a = pop_pair();
2104    c = _gvn.transform( new SubLNode(a,b) );
2105    push_pair(c);
2106    break;
2107  case Bytecodes::_lcmp:
2108    // Safepoints are now inserted _before_ branches.  The long-compare
2109    // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
2110    // slew of control flow.  These are usually followed by a CmpI vs zero and
2111    // a branch; this pattern then optimizes to the obvious long-compare and
2112    // branch.  However, if the branch is backwards there's a Safepoint
2113    // inserted.  The inserted Safepoint captures the JVM state at the
2114    // pre-branch point, i.e. it captures the 3-way value.  Thus if a
2115    // long-compare is used to control a loop the debug info will force
2116    // computation of the 3-way value, even though the generated code uses a
2117    // long-compare and branch.  We try to rectify the situation by inserting
2118    // a SafePoint here and have it dominate and kill the safepoint added at a
2119    // following backwards branch.  At this point the JVM state merely holds 2
2120    // longs but not the 3-way value.
2121    if( UseLoopSafepoints ) {
2122      switch( iter().next_bc() ) {
2123      case Bytecodes::_ifgt:
2124      case Bytecodes::_iflt:
2125      case Bytecodes::_ifge:
2126      case Bytecodes::_ifle:
2127      case Bytecodes::_ifne:
2128      case Bytecodes::_ifeq:
2129        // If this is a backwards branch in the bytecodes, add Safepoint
2130        maybe_add_safepoint(iter().next_get_dest());
2131      }
2132    }
2133    b = pop_pair();
2134    a = pop_pair();
2135    c = _gvn.transform( new CmpL3Node( a, b ));
2136    push(c);
2137    break;
2138
2139  case Bytecodes::_lneg:
2140    a = pop_pair();
2141    b = _gvn.transform( new SubLNode(longcon(0),a));
2142    push_pair(b);
2143    break;
2144  case Bytecodes::_l2i:
2145    a = pop_pair();
2146    push( _gvn.transform( new ConvL2INode(a)));
2147    break;
2148  case Bytecodes::_i2l:
2149    a = pop();
2150    b = _gvn.transform( new ConvI2LNode(a));
2151    push_pair(b);
2152    break;
2153  case Bytecodes::_i2b:
2154    // Sign extend
2155    a = pop();
2156    a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) );
2157    a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) );
2158    push( a );
2159    break;
2160  case Bytecodes::_i2s:
2161    a = pop();
2162    a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) );
2163    a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) );
2164    push( a );
2165    break;
2166  case Bytecodes::_i2c:
2167    a = pop();
2168    push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) );
2169    break;
2170
2171  case Bytecodes::_i2f:
2172    a = pop();
2173    b = _gvn.transform( new ConvI2FNode(a) ) ;
2174    c = precision_rounding(b);
2175    push (b);
2176    break;
2177
2178  case Bytecodes::_i2d:
2179    a = pop();
2180    b = _gvn.transform( new ConvI2DNode(a));
2181    push_pair(b);
2182    break;
2183
2184  case Bytecodes::_iinc:        // Increment local
2185    i = iter().get_index();     // Get local index
2186    set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2187    break;
2188
2189  // Exit points of synchronized methods must have an unlock node
2190  case Bytecodes::_return:
2191    return_current(NULL);
2192    break;
2193
2194  case Bytecodes::_ireturn:
2195  case Bytecodes::_areturn:
2196  case Bytecodes::_freturn:
2197    return_current(pop());
2198    break;
2199  case Bytecodes::_lreturn:
2200    return_current(pop_pair());
2201    break;
2202  case Bytecodes::_dreturn:
2203    return_current(pop_pair());
2204    break;
2205
2206  case Bytecodes::_athrow:
2207    // null exception oop throws NULL pointer exception
2208    null_check(peek());
2209    if (stopped())  return;
2210    // Hook the thrown exception directly to subsequent handlers.
2211    if (BailoutToInterpreterForThrows) {
2212      // Keep method interpreted from now on.
2213      uncommon_trap(Deoptimization::Reason_unhandled,
2214                    Deoptimization::Action_make_not_compilable);
2215      return;
2216    }
2217    if (env()->jvmti_can_post_on_exceptions()) {
2218      // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2219      uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2220    }
2221    // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2222    add_exception_state(make_exception_state(peek()));
2223    break;
2224
2225  case Bytecodes::_goto:   // fall through
2226  case Bytecodes::_goto_w: {
2227    int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2228
2229    // If this is a backwards branch in the bytecodes, add Safepoint
2230    maybe_add_safepoint(target_bci);
2231
2232    // Update method data
2233    profile_taken_branch(target_bci);
2234
2235    // Merge the current control into the target basic block
2236    merge(target_bci);
2237
2238    // See if we can get some profile data and hand it off to the next block
2239    Block *target_block = block()->successor_for_bci(target_bci);
2240    if (target_block->pred_count() != 1)  break;
2241    ciMethodData* methodData = method()->method_data();
2242    if (!methodData->is_mature())  break;
2243    ciProfileData* data = methodData->bci_to_data(bci());
2244    assert( data->is_JumpData(), "" );
2245    int taken = ((ciJumpData*)data)->taken();
2246    taken = method()->scale_count(taken);
2247    target_block->set_count(taken);
2248    break;
2249  }
2250
2251  case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
2252  case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2253  handle_if_null:
2254    // If this is a backwards branch in the bytecodes, add Safepoint
2255    maybe_add_safepoint(iter().get_dest());
2256    a = null();
2257    b = pop();
2258    if (!_gvn.type(b)->speculative_maybe_null() &&
2259        !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2260      inc_sp(1);
2261      Node* null_ctl = top();
2262      b = null_check_oop(b, &null_ctl, true, true, true);
2263      assert(null_ctl->is_top(), "no null control here");
2264      dec_sp(1);
2265    }
2266    c = _gvn.transform( new CmpPNode(b, a) );
2267    do_ifnull(btest, c);
2268    break;
2269
2270  case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2271  case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2272  handle_if_acmp:
2273    // If this is a backwards branch in the bytecodes, add Safepoint
2274    maybe_add_safepoint(iter().get_dest());
2275    a = pop();
2276    b = pop();
2277    c = _gvn.transform( new CmpPNode(b, a) );
2278    c = optimize_cmp_with_klass(c);
2279    do_if(btest, c);
2280    break;
2281
2282  case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2283  case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2284  case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2285  case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2286  case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2287  case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2288  handle_ifxx:
2289    // If this is a backwards branch in the bytecodes, add Safepoint
2290    maybe_add_safepoint(iter().get_dest());
2291    a = _gvn.intcon(0);
2292    b = pop();
2293    c = _gvn.transform( new CmpINode(b, a) );
2294    do_if(btest, c);
2295    break;
2296
2297  case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2298  case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2299  case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2300  case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2301  case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2302  case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2303  handle_if_icmp:
2304    // If this is a backwards branch in the bytecodes, add Safepoint
2305    maybe_add_safepoint(iter().get_dest());
2306    a = pop();
2307    b = pop();
2308    c = _gvn.transform( new CmpINode( b, a ) );
2309    do_if(btest, c);
2310    break;
2311
2312  case Bytecodes::_tableswitch:
2313    do_tableswitch();
2314    break;
2315
2316  case Bytecodes::_lookupswitch:
2317    do_lookupswitch();
2318    break;
2319
2320  case Bytecodes::_invokestatic:
2321  case Bytecodes::_invokedynamic:
2322  case Bytecodes::_invokespecial:
2323  case Bytecodes::_invokevirtual:
2324  case Bytecodes::_invokeinterface:
2325    do_call();
2326    break;
2327  case Bytecodes::_checkcast:
2328    do_checkcast();
2329    break;
2330  case Bytecodes::_instanceof:
2331    do_instanceof();
2332    break;
2333  case Bytecodes::_anewarray:
2334    do_anewarray();
2335    break;
2336  case Bytecodes::_newarray:
2337    do_newarray((BasicType)iter().get_index());
2338    break;
2339  case Bytecodes::_multianewarray:
2340    do_multianewarray();
2341    break;
2342  case Bytecodes::_new:
2343    do_new();
2344    break;
2345
2346  case Bytecodes::_jsr:
2347  case Bytecodes::_jsr_w:
2348    do_jsr();
2349    break;
2350
2351  case Bytecodes::_ret:
2352    do_ret();
2353    break;
2354
2355
2356  case Bytecodes::_monitorenter:
2357    do_monitor_enter();
2358    break;
2359
2360  case Bytecodes::_monitorexit:
2361    do_monitor_exit();
2362    break;
2363
2364  case Bytecodes::_breakpoint:
2365    // Breakpoint set concurrently to compile
2366    // %%% use an uncommon trap?
2367    C->record_failure("breakpoint in method");
2368    return;
2369
2370  default:
2371#ifndef PRODUCT
2372    map()->dump(99);
2373#endif
2374    tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2375    ShouldNotReachHere();
2376  }
2377
2378#ifndef PRODUCT
2379  IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2380  if (printer && printer->should_print(_method)) {
2381    char buffer[256];
2382    sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2383    bool old = printer->traverse_outs();
2384    printer->set_traverse_outs(true);
2385    printer->print_method(C, buffer, 4);
2386    printer->set_traverse_outs(old);
2387  }
2388#endif
2389}
2390