compile.cpp revision 4454:cc32ccaaf47f
1/*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/exceptionHandlerTable.hpp"
30#include "code/nmethod.hpp"
31#include "compiler/compileLog.hpp"
32#include "compiler/disassembler.hpp"
33#include "compiler/oopMap.hpp"
34#include "opto/addnode.hpp"
35#include "opto/block.hpp"
36#include "opto/c2compiler.hpp"
37#include "opto/callGenerator.hpp"
38#include "opto/callnode.hpp"
39#include "opto/cfgnode.hpp"
40#include "opto/chaitin.hpp"
41#include "opto/compile.hpp"
42#include "opto/connode.hpp"
43#include "opto/divnode.hpp"
44#include "opto/escape.hpp"
45#include "opto/idealGraphPrinter.hpp"
46#include "opto/loopnode.hpp"
47#include "opto/machnode.hpp"
48#include "opto/macro.hpp"
49#include "opto/matcher.hpp"
50#include "opto/memnode.hpp"
51#include "opto/mulnode.hpp"
52#include "opto/node.hpp"
53#include "opto/opcodes.hpp"
54#include "opto/output.hpp"
55#include "opto/parse.hpp"
56#include "opto/phaseX.hpp"
57#include "opto/rootnode.hpp"
58#include "opto/runtime.hpp"
59#include "opto/stringopts.hpp"
60#include "opto/type.hpp"
61#include "opto/vectornode.hpp"
62#include "runtime/arguments.hpp"
63#include "runtime/signature.hpp"
64#include "runtime/stubRoutines.hpp"
65#include "runtime/timer.hpp"
66#include "utilities/copy.hpp"
67#ifdef TARGET_ARCH_MODEL_x86_32
68# include "adfiles/ad_x86_32.hpp"
69#endif
70#ifdef TARGET_ARCH_MODEL_x86_64
71# include "adfiles/ad_x86_64.hpp"
72#endif
73#ifdef TARGET_ARCH_MODEL_sparc
74# include "adfiles/ad_sparc.hpp"
75#endif
76#ifdef TARGET_ARCH_MODEL_zero
77# include "adfiles/ad_zero.hpp"
78#endif
79#ifdef TARGET_ARCH_MODEL_arm
80# include "adfiles/ad_arm.hpp"
81#endif
82#ifdef TARGET_ARCH_MODEL_ppc
83# include "adfiles/ad_ppc.hpp"
84#endif
85
86
87// -------------------- Compile::mach_constant_base_node -----------------------
88// Constant table base node singleton.
89MachConstantBaseNode* Compile::mach_constant_base_node() {
90  if (_mach_constant_base_node == NULL) {
91    _mach_constant_base_node = new (C) MachConstantBaseNode();
92    _mach_constant_base_node->add_req(C->root());
93  }
94  return _mach_constant_base_node;
95}
96
97
98/// Support for intrinsics.
99
100// Return the index at which m must be inserted (or already exists).
101// The sort order is by the address of the ciMethod, with is_virtual as minor key.
102int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
103#ifdef ASSERT
104  for (int i = 1; i < _intrinsics->length(); i++) {
105    CallGenerator* cg1 = _intrinsics->at(i-1);
106    CallGenerator* cg2 = _intrinsics->at(i);
107    assert(cg1->method() != cg2->method()
108           ? cg1->method()     < cg2->method()
109           : cg1->is_virtual() < cg2->is_virtual(),
110           "compiler intrinsics list must stay sorted");
111  }
112#endif
113  // Binary search sorted list, in decreasing intervals [lo, hi].
114  int lo = 0, hi = _intrinsics->length()-1;
115  while (lo <= hi) {
116    int mid = (uint)(hi + lo) / 2;
117    ciMethod* mid_m = _intrinsics->at(mid)->method();
118    if (m < mid_m) {
119      hi = mid-1;
120    } else if (m > mid_m) {
121      lo = mid+1;
122    } else {
123      // look at minor sort key
124      bool mid_virt = _intrinsics->at(mid)->is_virtual();
125      if (is_virtual < mid_virt) {
126        hi = mid-1;
127      } else if (is_virtual > mid_virt) {
128        lo = mid+1;
129      } else {
130        return mid;  // exact match
131      }
132    }
133  }
134  return lo;  // inexact match
135}
136
137void Compile::register_intrinsic(CallGenerator* cg) {
138  if (_intrinsics == NULL) {
139    _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
140  }
141  // This code is stolen from ciObjectFactory::insert.
142  // Really, GrowableArray should have methods for
143  // insert_at, remove_at, and binary_search.
144  int len = _intrinsics->length();
145  int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
146  if (index == len) {
147    _intrinsics->append(cg);
148  } else {
149#ifdef ASSERT
150    CallGenerator* oldcg = _intrinsics->at(index);
151    assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
152#endif
153    _intrinsics->append(_intrinsics->at(len-1));
154    int pos;
155    for (pos = len-2; pos >= index; pos--) {
156      _intrinsics->at_put(pos+1,_intrinsics->at(pos));
157    }
158    _intrinsics->at_put(index, cg);
159  }
160  assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
161}
162
163CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
164  assert(m->is_loaded(), "don't try this on unloaded methods");
165  if (_intrinsics != NULL) {
166    int index = intrinsic_insertion_index(m, is_virtual);
167    if (index < _intrinsics->length()
168        && _intrinsics->at(index)->method() == m
169        && _intrinsics->at(index)->is_virtual() == is_virtual) {
170      return _intrinsics->at(index);
171    }
172  }
173  // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
174  if (m->intrinsic_id() != vmIntrinsics::_none &&
175      m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
176    CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
177    if (cg != NULL) {
178      // Save it for next time:
179      register_intrinsic(cg);
180      return cg;
181    } else {
182      gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
183    }
184  }
185  return NULL;
186}
187
188// Compile:: register_library_intrinsics and make_vm_intrinsic are defined
189// in library_call.cpp.
190
191
192#ifndef PRODUCT
193// statistics gathering...
194
195juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
196jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
197
198bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
199  assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
200  int oflags = _intrinsic_hist_flags[id];
201  assert(flags != 0, "what happened?");
202  if (is_virtual) {
203    flags |= _intrinsic_virtual;
204  }
205  bool changed = (flags != oflags);
206  if ((flags & _intrinsic_worked) != 0) {
207    juint count = (_intrinsic_hist_count[id] += 1);
208    if (count == 1) {
209      changed = true;           // first time
210    }
211    // increment the overall count also:
212    _intrinsic_hist_count[vmIntrinsics::_none] += 1;
213  }
214  if (changed) {
215    if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
216      // Something changed about the intrinsic's virtuality.
217      if ((flags & _intrinsic_virtual) != 0) {
218        // This is the first use of this intrinsic as a virtual call.
219        if (oflags != 0) {
220          // We already saw it as a non-virtual, so note both cases.
221          flags |= _intrinsic_both;
222        }
223      } else if ((oflags & _intrinsic_both) == 0) {
224        // This is the first use of this intrinsic as a non-virtual
225        flags |= _intrinsic_both;
226      }
227    }
228    _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
229  }
230  // update the overall flags also:
231  _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
232  return changed;
233}
234
235static char* format_flags(int flags, char* buf) {
236  buf[0] = 0;
237  if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
238  if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
239  if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
240  if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
241  if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
242  if (buf[0] == 0)  strcat(buf, ",");
243  assert(buf[0] == ',', "must be");
244  return &buf[1];
245}
246
247void Compile::print_intrinsic_statistics() {
248  char flagsbuf[100];
249  ttyLocker ttyl;
250  if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
251  tty->print_cr("Compiler intrinsic usage:");
252  juint total = _intrinsic_hist_count[vmIntrinsics::_none];
253  if (total == 0)  total = 1;  // avoid div0 in case of no successes
254  #define PRINT_STAT_LINE(name, c, f) \
255    tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
256  for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
257    vmIntrinsics::ID id = (vmIntrinsics::ID) index;
258    int   flags = _intrinsic_hist_flags[id];
259    juint count = _intrinsic_hist_count[id];
260    if ((flags | count) != 0) {
261      PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
262    }
263  }
264  PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
265  if (xtty != NULL)  xtty->tail("statistics");
266}
267
268void Compile::print_statistics() {
269  { ttyLocker ttyl;
270    if (xtty != NULL)  xtty->head("statistics type='opto'");
271    Parse::print_statistics();
272    PhaseCCP::print_statistics();
273    PhaseRegAlloc::print_statistics();
274    Scheduling::print_statistics();
275    PhasePeephole::print_statistics();
276    PhaseIdealLoop::print_statistics();
277    if (xtty != NULL)  xtty->tail("statistics");
278  }
279  if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
280    // put this under its own <statistics> element.
281    print_intrinsic_statistics();
282  }
283}
284#endif //PRODUCT
285
286// Support for bundling info
287Bundle* Compile::node_bundling(const Node *n) {
288  assert(valid_bundle_info(n), "oob");
289  return &_node_bundling_base[n->_idx];
290}
291
292bool Compile::valid_bundle_info(const Node *n) {
293  return (_node_bundling_limit > n->_idx);
294}
295
296
297void Compile::gvn_replace_by(Node* n, Node* nn) {
298  for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
299    Node* use = n->last_out(i);
300    bool is_in_table = initial_gvn()->hash_delete(use);
301    uint uses_found = 0;
302    for (uint j = 0; j < use->len(); j++) {
303      if (use->in(j) == n) {
304        if (j < use->req())
305          use->set_req(j, nn);
306        else
307          use->set_prec(j, nn);
308        uses_found++;
309      }
310    }
311    if (is_in_table) {
312      // reinsert into table
313      initial_gvn()->hash_find_insert(use);
314    }
315    record_for_igvn(use);
316    i -= uses_found;    // we deleted 1 or more copies of this edge
317  }
318}
319
320
321static inline bool not_a_node(const Node* n) {
322  if (n == NULL)                   return true;
323  if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
324  if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
325  return false;
326}
327
328// Identify all nodes that are reachable from below, useful.
329// Use breadth-first pass that records state in a Unique_Node_List,
330// recursive traversal is slower.
331void Compile::identify_useful_nodes(Unique_Node_List &useful) {
332  int estimated_worklist_size = unique();
333  useful.map( estimated_worklist_size, NULL );  // preallocate space
334
335  // Initialize worklist
336  if (root() != NULL)     { useful.push(root()); }
337  // If 'top' is cached, declare it useful to preserve cached node
338  if( cached_top_node() ) { useful.push(cached_top_node()); }
339
340  // Push all useful nodes onto the list, breadthfirst
341  for( uint next = 0; next < useful.size(); ++next ) {
342    assert( next < unique(), "Unique useful nodes < total nodes");
343    Node *n  = useful.at(next);
344    uint max = n->len();
345    for( uint i = 0; i < max; ++i ) {
346      Node *m = n->in(i);
347      if (not_a_node(m))  continue;
348      useful.push(m);
349    }
350  }
351}
352
353// Update dead_node_list with any missing dead nodes using useful
354// list. Consider all non-useful nodes to be useless i.e., dead nodes.
355void Compile::update_dead_node_list(Unique_Node_List &useful) {
356  uint max_idx = unique();
357  VectorSet& useful_node_set = useful.member_set();
358
359  for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
360    // If node with index node_idx is not in useful set,
361    // mark it as dead in dead node list.
362    if (! useful_node_set.test(node_idx) ) {
363      record_dead_node(node_idx);
364    }
365  }
366}
367
368void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
369  int shift = 0;
370  for (int i = 0; i < inlines->length(); i++) {
371    CallGenerator* cg = inlines->at(i);
372    CallNode* call = cg->call_node();
373    if (shift > 0) {
374      inlines->at_put(i-shift, cg);
375    }
376    if (!useful.member(call)) {
377      shift++;
378    }
379  }
380  inlines->trunc_to(inlines->length()-shift);
381}
382
383// Disconnect all useless nodes by disconnecting those at the boundary.
384void Compile::remove_useless_nodes(Unique_Node_List &useful) {
385  uint next = 0;
386  while (next < useful.size()) {
387    Node *n = useful.at(next++);
388    // Use raw traversal of out edges since this code removes out edges
389    int max = n->outcnt();
390    for (int j = 0; j < max; ++j) {
391      Node* child = n->raw_out(j);
392      if (! useful.member(child)) {
393        assert(!child->is_top() || child != top(),
394               "If top is cached in Compile object it is in useful list");
395        // Only need to remove this out-edge to the useless node
396        n->raw_del_out(j);
397        --j;
398        --max;
399      }
400    }
401    if (n->outcnt() == 1 && n->has_special_unique_user()) {
402      record_for_igvn(n->unique_out());
403    }
404  }
405  // Remove useless macro and predicate opaq nodes
406  for (int i = C->macro_count()-1; i >= 0; i--) {
407    Node* n = C->macro_node(i);
408    if (!useful.member(n)) {
409      remove_macro_node(n);
410    }
411  }
412  // Remove useless expensive node
413  for (int i = C->expensive_count()-1; i >= 0; i--) {
414    Node* n = C->expensive_node(i);
415    if (!useful.member(n)) {
416      remove_expensive_node(n);
417    }
418  }
419  // clean up the late inline lists
420  remove_useless_late_inlines(&_string_late_inlines, useful);
421  remove_useless_late_inlines(&_late_inlines, useful);
422  debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
423}
424
425//------------------------------frame_size_in_words-----------------------------
426// frame_slots in units of words
427int Compile::frame_size_in_words() const {
428  // shift is 0 in LP32 and 1 in LP64
429  const int shift = (LogBytesPerWord - LogBytesPerInt);
430  int words = _frame_slots >> shift;
431  assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
432  return words;
433}
434
435// ============================================================================
436//------------------------------CompileWrapper---------------------------------
437class CompileWrapper : public StackObj {
438  Compile *const _compile;
439 public:
440  CompileWrapper(Compile* compile);
441
442  ~CompileWrapper();
443};
444
445CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
446  // the Compile* pointer is stored in the current ciEnv:
447  ciEnv* env = compile->env();
448  assert(env == ciEnv::current(), "must already be a ciEnv active");
449  assert(env->compiler_data() == NULL, "compile already active?");
450  env->set_compiler_data(compile);
451  assert(compile == Compile::current(), "sanity");
452
453  compile->set_type_dict(NULL);
454  compile->set_type_hwm(NULL);
455  compile->set_type_last_size(0);
456  compile->set_last_tf(NULL, NULL);
457  compile->set_indexSet_arena(NULL);
458  compile->set_indexSet_free_block_list(NULL);
459  compile->init_type_arena();
460  Type::Initialize(compile);
461  _compile->set_scratch_buffer_blob(NULL);
462  _compile->begin_method();
463}
464CompileWrapper::~CompileWrapper() {
465  _compile->end_method();
466  if (_compile->scratch_buffer_blob() != NULL)
467    BufferBlob::free(_compile->scratch_buffer_blob());
468  _compile->env()->set_compiler_data(NULL);
469}
470
471
472//----------------------------print_compile_messages---------------------------
473void Compile::print_compile_messages() {
474#ifndef PRODUCT
475  // Check if recompiling
476  if (_subsume_loads == false && PrintOpto) {
477    // Recompiling without allowing machine instructions to subsume loads
478    tty->print_cr("*********************************************************");
479    tty->print_cr("** Bailout: Recompile without subsuming loads          **");
480    tty->print_cr("*********************************************************");
481  }
482  if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
483    // Recompiling without escape analysis
484    tty->print_cr("*********************************************************");
485    tty->print_cr("** Bailout: Recompile without escape analysis          **");
486    tty->print_cr("*********************************************************");
487  }
488  if (env()->break_at_compile()) {
489    // Open the debugger when compiling this method.
490    tty->print("### Breaking when compiling: ");
491    method()->print_short_name();
492    tty->cr();
493    BREAKPOINT;
494  }
495
496  if( PrintOpto ) {
497    if (is_osr_compilation()) {
498      tty->print("[OSR]%3d", _compile_id);
499    } else {
500      tty->print("%3d", _compile_id);
501    }
502  }
503#endif
504}
505
506
507//-----------------------init_scratch_buffer_blob------------------------------
508// Construct a temporary BufferBlob and cache it for this compile.
509void Compile::init_scratch_buffer_blob(int const_size) {
510  // If there is already a scratch buffer blob allocated and the
511  // constant section is big enough, use it.  Otherwise free the
512  // current and allocate a new one.
513  BufferBlob* blob = scratch_buffer_blob();
514  if ((blob != NULL) && (const_size <= _scratch_const_size)) {
515    // Use the current blob.
516  } else {
517    if (blob != NULL) {
518      BufferBlob::free(blob);
519    }
520
521    ResourceMark rm;
522    _scratch_const_size = const_size;
523    int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
524    blob = BufferBlob::create("Compile::scratch_buffer", size);
525    // Record the buffer blob for next time.
526    set_scratch_buffer_blob(blob);
527    // Have we run out of code space?
528    if (scratch_buffer_blob() == NULL) {
529      // Let CompilerBroker disable further compilations.
530      record_failure("Not enough space for scratch buffer in CodeCache");
531      return;
532    }
533  }
534
535  // Initialize the relocation buffers
536  relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
537  set_scratch_locs_memory(locs_buf);
538}
539
540
541//-----------------------scratch_emit_size-------------------------------------
542// Helper function that computes size by emitting code
543uint Compile::scratch_emit_size(const Node* n) {
544  // Start scratch_emit_size section.
545  set_in_scratch_emit_size(true);
546
547  // Emit into a trash buffer and count bytes emitted.
548  // This is a pretty expensive way to compute a size,
549  // but it works well enough if seldom used.
550  // All common fixed-size instructions are given a size
551  // method by the AD file.
552  // Note that the scratch buffer blob and locs memory are
553  // allocated at the beginning of the compile task, and
554  // may be shared by several calls to scratch_emit_size.
555  // The allocation of the scratch buffer blob is particularly
556  // expensive, since it has to grab the code cache lock.
557  BufferBlob* blob = this->scratch_buffer_blob();
558  assert(blob != NULL, "Initialize BufferBlob at start");
559  assert(blob->size() > MAX_inst_size, "sanity");
560  relocInfo* locs_buf = scratch_locs_memory();
561  address blob_begin = blob->content_begin();
562  address blob_end   = (address)locs_buf;
563  assert(blob->content_contains(blob_end), "sanity");
564  CodeBuffer buf(blob_begin, blob_end - blob_begin);
565  buf.initialize_consts_size(_scratch_const_size);
566  buf.initialize_stubs_size(MAX_stubs_size);
567  assert(locs_buf != NULL, "sanity");
568  int lsize = MAX_locs_size / 3;
569  buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
570  buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
571  buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
572
573  // Do the emission.
574
575  Label fakeL; // Fake label for branch instructions.
576  Label*   saveL = NULL;
577  uint save_bnum = 0;
578  bool is_branch = n->is_MachBranch();
579  if (is_branch) {
580    MacroAssembler masm(&buf);
581    masm.bind(fakeL);
582    n->as_MachBranch()->save_label(&saveL, &save_bnum);
583    n->as_MachBranch()->label_set(&fakeL, 0);
584  }
585  n->emit(buf, this->regalloc());
586  if (is_branch) // Restore label.
587    n->as_MachBranch()->label_set(saveL, save_bnum);
588
589  // End scratch_emit_size section.
590  set_in_scratch_emit_size(false);
591
592  return buf.insts_size();
593}
594
595
596// ============================================================================
597//------------------------------Compile standard-------------------------------
598debug_only( int Compile::_debug_idx = 100000; )
599
600// Compile a method.  entry_bci is -1 for normal compilations and indicates
601// the continuation bci for on stack replacement.
602
603
604Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis )
605                : Phase(Compiler),
606                  _env(ci_env),
607                  _log(ci_env->log()),
608                  _compile_id(ci_env->compile_id()),
609                  _save_argument_registers(false),
610                  _stub_name(NULL),
611                  _stub_function(NULL),
612                  _stub_entry_point(NULL),
613                  _method(target),
614                  _entry_bci(osr_bci),
615                  _initial_gvn(NULL),
616                  _for_igvn(NULL),
617                  _warm_calls(NULL),
618                  _subsume_loads(subsume_loads),
619                  _do_escape_analysis(do_escape_analysis),
620                  _failure_reason(NULL),
621                  _code_buffer("Compile::Fill_buffer"),
622                  _orig_pc_slot(0),
623                  _orig_pc_slot_offset_in_bytes(0),
624                  _has_method_handle_invokes(false),
625                  _mach_constant_base_node(NULL),
626                  _node_bundling_limit(0),
627                  _node_bundling_base(NULL),
628                  _java_calls(0),
629                  _inner_loops(0),
630                  _scratch_const_size(-1),
631                  _in_scratch_emit_size(false),
632                  _dead_node_list(comp_arena()),
633                  _dead_node_count(0),
634#ifndef PRODUCT
635                  _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
636                  _printer(IdealGraphPrinter::printer()),
637#endif
638                  _congraph(NULL),
639                  _late_inlines(comp_arena(), 2, 0, NULL),
640                  _string_late_inlines(comp_arena(), 2, 0, NULL),
641                  _late_inlines_pos(0),
642                  _number_of_mh_late_inlines(0),
643                  _inlining_progress(false),
644                  _inlining_incrementally(false),
645                  _print_inlining_list(NULL),
646                  _print_inlining(0) {
647  C = this;
648
649  CompileWrapper cw(this);
650#ifndef PRODUCT
651  if (TimeCompiler2) {
652    tty->print(" ");
653    target->holder()->name()->print();
654    tty->print(".");
655    target->print_short_name();
656    tty->print("  ");
657  }
658  TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
659  TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
660  bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
661  if (!print_opto_assembly) {
662    bool print_assembly = (PrintAssembly || _method->should_print_assembly());
663    if (print_assembly && !Disassembler::can_decode()) {
664      tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
665      print_opto_assembly = true;
666    }
667  }
668  set_print_assembly(print_opto_assembly);
669  set_parsed_irreducible_loop(false);
670#endif
671
672  if (ProfileTraps) {
673    // Make sure the method being compiled gets its own MDO,
674    // so we can at least track the decompile_count().
675    method()->ensure_method_data();
676  }
677
678  Init(::AliasLevel);
679
680
681  print_compile_messages();
682
683  if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
684    _ilt = InlineTree::build_inline_tree_root();
685  else
686    _ilt = NULL;
687
688  // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
689  assert(num_alias_types() >= AliasIdxRaw, "");
690
691#define MINIMUM_NODE_HASH  1023
692  // Node list that Iterative GVN will start with
693  Unique_Node_List for_igvn(comp_arena());
694  set_for_igvn(&for_igvn);
695
696  // GVN that will be run immediately on new nodes
697  uint estimated_size = method()->code_size()*4+64;
698  estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
699  PhaseGVN gvn(node_arena(), estimated_size);
700  set_initial_gvn(&gvn);
701
702  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
703    _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
704  }
705  { // Scope for timing the parser
706    TracePhase t3("parse", &_t_parser, true);
707
708    // Put top into the hash table ASAP.
709    initial_gvn()->transform_no_reclaim(top());
710
711    // Set up tf(), start(), and find a CallGenerator.
712    CallGenerator* cg = NULL;
713    if (is_osr_compilation()) {
714      const TypeTuple *domain = StartOSRNode::osr_domain();
715      const TypeTuple *range = TypeTuple::make_range(method()->signature());
716      init_tf(TypeFunc::make(domain, range));
717      StartNode* s = new (this) StartOSRNode(root(), domain);
718      initial_gvn()->set_type_bottom(s);
719      init_start(s);
720      cg = CallGenerator::for_osr(method(), entry_bci());
721    } else {
722      // Normal case.
723      init_tf(TypeFunc::make(method()));
724      StartNode* s = new (this) StartNode(root(), tf()->domain());
725      initial_gvn()->set_type_bottom(s);
726      init_start(s);
727      if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
728        // With java.lang.ref.reference.get() we must go through the
729        // intrinsic when G1 is enabled - even when get() is the root
730        // method of the compile - so that, if necessary, the value in
731        // the referent field of the reference object gets recorded by
732        // the pre-barrier code.
733        // Specifically, if G1 is enabled, the value in the referent
734        // field is recorded by the G1 SATB pre barrier. This will
735        // result in the referent being marked live and the reference
736        // object removed from the list of discovered references during
737        // reference processing.
738        cg = find_intrinsic(method(), false);
739      }
740      if (cg == NULL) {
741        float past_uses = method()->interpreter_invocation_count();
742        float expected_uses = past_uses;
743        cg = CallGenerator::for_inline(method(), expected_uses);
744      }
745    }
746    if (failing())  return;
747    if (cg == NULL) {
748      record_method_not_compilable_all_tiers("cannot parse method");
749      return;
750    }
751    JVMState* jvms = build_start_state(start(), tf());
752    if ((jvms = cg->generate(jvms)) == NULL) {
753      record_method_not_compilable("method parse failed");
754      return;
755    }
756    GraphKit kit(jvms);
757
758    if (!kit.stopped()) {
759      // Accept return values, and transfer control we know not where.
760      // This is done by a special, unique ReturnNode bound to root.
761      return_values(kit.jvms());
762    }
763
764    if (kit.has_exceptions()) {
765      // Any exceptions that escape from this call must be rethrown
766      // to whatever caller is dynamically above us on the stack.
767      // This is done by a special, unique RethrowNode bound to root.
768      rethrow_exceptions(kit.transfer_exceptions_into_jvms());
769    }
770
771    assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
772
773    if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
774      inline_string_calls(true);
775    }
776
777    if (failing())  return;
778
779    print_method("Before RemoveUseless", 3);
780
781    // Remove clutter produced by parsing.
782    if (!failing()) {
783      ResourceMark rm;
784      PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
785    }
786  }
787
788  // Note:  Large methods are capped off in do_one_bytecode().
789  if (failing())  return;
790
791  // After parsing, node notes are no longer automagic.
792  // They must be propagated by register_new_node_with_optimizer(),
793  // clone(), or the like.
794  set_default_node_notes(NULL);
795
796  for (;;) {
797    int successes = Inline_Warm();
798    if (failing())  return;
799    if (successes == 0)  break;
800  }
801
802  // Drain the list.
803  Finish_Warm();
804#ifndef PRODUCT
805  if (_printer) {
806    _printer->print_inlining(this);
807  }
808#endif
809
810  if (failing())  return;
811  NOT_PRODUCT( verify_graph_edges(); )
812
813  // Now optimize
814  Optimize();
815  if (failing())  return;
816  NOT_PRODUCT( verify_graph_edges(); )
817
818#ifndef PRODUCT
819  if (PrintIdeal) {
820    ttyLocker ttyl;  // keep the following output all in one block
821    // This output goes directly to the tty, not the compiler log.
822    // To enable tools to match it up with the compilation activity,
823    // be sure to tag this tty output with the compile ID.
824    if (xtty != NULL) {
825      xtty->head("ideal compile_id='%d'%s", compile_id(),
826                 is_osr_compilation()    ? " compile_kind='osr'" :
827                 "");
828    }
829    root()->dump(9999);
830    if (xtty != NULL) {
831      xtty->tail("ideal");
832    }
833  }
834#endif
835
836  // Now that we know the size of all the monitors we can add a fixed slot
837  // for the original deopt pc.
838
839  _orig_pc_slot =  fixed_slots();
840  int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
841  set_fixed_slots(next_slot);
842
843  // Now generate code
844  Code_Gen();
845  if (failing())  return;
846
847  // Check if we want to skip execution of all compiled code.
848  {
849#ifndef PRODUCT
850    if (OptoNoExecute) {
851      record_method_not_compilable("+OptoNoExecute");  // Flag as failed
852      return;
853    }
854    TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
855#endif
856
857    if (is_osr_compilation()) {
858      _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
859      _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
860    } else {
861      _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
862      _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
863    }
864
865    env()->register_method(_method, _entry_bci,
866                           &_code_offsets,
867                           _orig_pc_slot_offset_in_bytes,
868                           code_buffer(),
869                           frame_size_in_words(), _oop_map_set,
870                           &_handler_table, &_inc_table,
871                           compiler,
872                           env()->comp_level(),
873                           has_unsafe_access(),
874                           SharedRuntime::is_wide_vector(max_vector_size())
875                           );
876
877    if (log() != NULL) // Print code cache state into compiler log
878      log()->code_cache_state();
879  }
880}
881
882//------------------------------Compile----------------------------------------
883// Compile a runtime stub
884Compile::Compile( ciEnv* ci_env,
885                  TypeFunc_generator generator,
886                  address stub_function,
887                  const char *stub_name,
888                  int is_fancy_jump,
889                  bool pass_tls,
890                  bool save_arg_registers,
891                  bool return_pc )
892  : Phase(Compiler),
893    _env(ci_env),
894    _log(ci_env->log()),
895    _compile_id(0),
896    _save_argument_registers(save_arg_registers),
897    _method(NULL),
898    _stub_name(stub_name),
899    _stub_function(stub_function),
900    _stub_entry_point(NULL),
901    _entry_bci(InvocationEntryBci),
902    _initial_gvn(NULL),
903    _for_igvn(NULL),
904    _warm_calls(NULL),
905    _orig_pc_slot(0),
906    _orig_pc_slot_offset_in_bytes(0),
907    _subsume_loads(true),
908    _do_escape_analysis(false),
909    _failure_reason(NULL),
910    _code_buffer("Compile::Fill_buffer"),
911    _has_method_handle_invokes(false),
912    _mach_constant_base_node(NULL),
913    _node_bundling_limit(0),
914    _node_bundling_base(NULL),
915    _java_calls(0),
916    _inner_loops(0),
917#ifndef PRODUCT
918    _trace_opto_output(TraceOptoOutput),
919    _printer(NULL),
920#endif
921    _dead_node_list(comp_arena()),
922    _dead_node_count(0),
923    _congraph(NULL),
924    _number_of_mh_late_inlines(0),
925    _inlining_progress(false),
926    _inlining_incrementally(false),
927    _print_inlining_list(NULL),
928    _print_inlining(0) {
929  C = this;
930
931#ifndef PRODUCT
932  TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
933  TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
934  set_print_assembly(PrintFrameConverterAssembly);
935  set_parsed_irreducible_loop(false);
936#endif
937  CompileWrapper cw(this);
938  Init(/*AliasLevel=*/ 0);
939  init_tf((*generator)());
940
941  {
942    // The following is a dummy for the sake of GraphKit::gen_stub
943    Unique_Node_List for_igvn(comp_arena());
944    set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
945    PhaseGVN gvn(Thread::current()->resource_area(),255);
946    set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
947    gvn.transform_no_reclaim(top());
948
949    GraphKit kit;
950    kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
951  }
952
953  NOT_PRODUCT( verify_graph_edges(); )
954  Code_Gen();
955  if (failing())  return;
956
957
958  // Entry point will be accessed using compile->stub_entry_point();
959  if (code_buffer() == NULL) {
960    Matcher::soft_match_failure();
961  } else {
962    if (PrintAssembly && (WizardMode || Verbose))
963      tty->print_cr("### Stub::%s", stub_name);
964
965    if (!failing()) {
966      assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");
967
968      // Make the NMethod
969      // For now we mark the frame as never safe for profile stackwalking
970      RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
971                                                      code_buffer(),
972                                                      CodeOffsets::frame_never_safe,
973                                                      // _code_offsets.value(CodeOffsets::Frame_Complete),
974                                                      frame_size_in_words(),
975                                                      _oop_map_set,
976                                                      save_arg_registers);
977      assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
978
979      _stub_entry_point = rs->entry_point();
980    }
981  }
982}
983
984//------------------------------Init-------------------------------------------
985// Prepare for a single compilation
986void Compile::Init(int aliaslevel) {
987  _unique  = 0;
988  _regalloc = NULL;
989
990  _tf      = NULL;  // filled in later
991  _top     = NULL;  // cached later
992  _matcher = NULL;  // filled in later
993  _cfg     = NULL;  // filled in later
994
995  set_24_bit_selection_and_mode(Use24BitFP, false);
996
997  _node_note_array = NULL;
998  _default_node_notes = NULL;
999
1000  _immutable_memory = NULL; // filled in at first inquiry
1001
1002  // Globally visible Nodes
1003  // First set TOP to NULL to give safe behavior during creation of RootNode
1004  set_cached_top_node(NULL);
1005  set_root(new (this) RootNode());
1006  // Now that you have a Root to point to, create the real TOP
1007  set_cached_top_node( new (this) ConNode(Type::TOP) );
1008  set_recent_alloc(NULL, NULL);
1009
1010  // Create Debug Information Recorder to record scopes, oopmaps, etc.
1011  env()->set_oop_recorder(new OopRecorder(env()->arena()));
1012  env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
1013  env()->set_dependencies(new Dependencies(env()));
1014
1015  _fixed_slots = 0;
1016  set_has_split_ifs(false);
1017  set_has_loops(has_method() && method()->has_loops()); // first approximation
1018  set_has_stringbuilder(false);
1019  _trap_can_recompile = false;  // no traps emitted yet
1020  _major_progress = true; // start out assuming good things will happen
1021  set_has_unsafe_access(false);
1022  set_max_vector_size(0);
1023  Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1024  set_decompile_count(0);
1025
1026  set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
1027  set_num_loop_opts(LoopOptsCount);
1028  set_do_inlining(Inline);
1029  set_max_inline_size(MaxInlineSize);
1030  set_freq_inline_size(FreqInlineSize);
1031  set_do_scheduling(OptoScheduling);
1032  set_do_count_invocations(false);
1033  set_do_method_data_update(false);
1034
1035  if (debug_info()->recording_non_safepoints()) {
1036    set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
1037                        (comp_arena(), 8, 0, NULL));
1038    set_default_node_notes(Node_Notes::make(this));
1039  }
1040
1041  // // -- Initialize types before each compile --
1042  // // Update cached type information
1043  // if( _method && _method->constants() )
1044  //   Type::update_loaded_types(_method, _method->constants());
1045
1046  // Init alias_type map.
1047  if (!_do_escape_analysis && aliaslevel == 3)
1048    aliaslevel = 2;  // No unique types without escape analysis
1049  _AliasLevel = aliaslevel;
1050  const int grow_ats = 16;
1051  _max_alias_types = grow_ats;
1052  _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
1053  AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
1054  Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
1055  {
1056    for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
1057  }
1058  // Initialize the first few types.
1059  _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
1060  _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
1061  _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
1062  _num_alias_types = AliasIdxRaw+1;
1063  // Zero out the alias type cache.
1064  Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
1065  // A NULL adr_type hits in the cache right away.  Preload the right answer.
1066  probe_alias_cache(NULL)->_index = AliasIdxTop;
1067
1068  _intrinsics = NULL;
1069  _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1070  _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1071  _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
1072  register_library_intrinsics();
1073}
1074
1075//---------------------------init_start----------------------------------------
1076// Install the StartNode on this compile object.
1077void Compile::init_start(StartNode* s) {
1078  if (failing())
1079    return; // already failing
1080  assert(s == start(), "");
1081}
1082
1083StartNode* Compile::start() const {
1084  assert(!failing(), "");
1085  for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
1086    Node* start = root()->fast_out(i);
1087    if( start->is_Start() )
1088      return start->as_Start();
1089  }
1090  ShouldNotReachHere();
1091  return NULL;
1092}
1093
1094//-------------------------------immutable_memory-------------------------------------
1095// Access immutable memory
1096Node* Compile::immutable_memory() {
1097  if (_immutable_memory != NULL) {
1098    return _immutable_memory;
1099  }
1100  StartNode* s = start();
1101  for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
1102    Node *p = s->fast_out(i);
1103    if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
1104      _immutable_memory = p;
1105      return _immutable_memory;
1106    }
1107  }
1108  ShouldNotReachHere();
1109  return NULL;
1110}
1111
1112//----------------------set_cached_top_node------------------------------------
1113// Install the cached top node, and make sure Node::is_top works correctly.
1114void Compile::set_cached_top_node(Node* tn) {
1115  if (tn != NULL)  verify_top(tn);
1116  Node* old_top = _top;
1117  _top = tn;
1118  // Calling Node::setup_is_top allows the nodes the chance to adjust
1119  // their _out arrays.
1120  if (_top != NULL)     _top->setup_is_top();
1121  if (old_top != NULL)  old_top->setup_is_top();
1122  assert(_top == NULL || top()->is_top(), "");
1123}
1124
1125#ifdef ASSERT
1126uint Compile::count_live_nodes_by_graph_walk() {
1127  Unique_Node_List useful(comp_arena());
1128  // Get useful node list by walking the graph.
1129  identify_useful_nodes(useful);
1130  return useful.size();
1131}
1132
1133void Compile::print_missing_nodes() {
1134
1135  // Return if CompileLog is NULL and PrintIdealNodeCount is false.
1136  if ((_log == NULL) && (! PrintIdealNodeCount)) {
1137    return;
1138  }
1139
1140  // This is an expensive function. It is executed only when the user
1141  // specifies VerifyIdealNodeCount option or otherwise knows the
1142  // additional work that needs to be done to identify reachable nodes
1143  // by walking the flow graph and find the missing ones using
1144  // _dead_node_list.
1145
1146  Unique_Node_List useful(comp_arena());
1147  // Get useful node list by walking the graph.
1148  identify_useful_nodes(useful);
1149
1150  uint l_nodes = C->live_nodes();
1151  uint l_nodes_by_walk = useful.size();
1152
1153  if (l_nodes != l_nodes_by_walk) {
1154    if (_log != NULL) {
1155      _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
1156      _log->stamp();
1157      _log->end_head();
1158    }
1159    VectorSet& useful_member_set = useful.member_set();
1160    int last_idx = l_nodes_by_walk;
1161    for (int i = 0; i < last_idx; i++) {
1162      if (useful_member_set.test(i)) {
1163        if (_dead_node_list.test(i)) {
1164          if (_log != NULL) {
1165            _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
1166          }
1167          if (PrintIdealNodeCount) {
1168            // Print the log message to tty
1169              tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
1170              useful.at(i)->dump();
1171          }
1172        }
1173      }
1174      else if (! _dead_node_list.test(i)) {
1175        if (_log != NULL) {
1176          _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
1177        }
1178        if (PrintIdealNodeCount) {
1179          // Print the log message to tty
1180          tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
1181        }
1182      }
1183    }
1184    if (_log != NULL) {
1185      _log->tail("mismatched_nodes");
1186    }
1187  }
1188}
1189#endif
1190
1191#ifndef PRODUCT
1192void Compile::verify_top(Node* tn) const {
1193  if (tn != NULL) {
1194    assert(tn->is_Con(), "top node must be a constant");
1195    assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
1196    assert(tn->in(0) != NULL, "must have live top node");
1197  }
1198}
1199#endif
1200
1201
1202///-------------------Managing Per-Node Debug & Profile Info-------------------
1203
1204void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
1205  guarantee(arr != NULL, "");
1206  int num_blocks = arr->length();
1207  if (grow_by < num_blocks)  grow_by = num_blocks;
1208  int num_notes = grow_by * _node_notes_block_size;
1209  Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
1210  Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
1211  while (num_notes > 0) {
1212    arr->append(notes);
1213    notes     += _node_notes_block_size;
1214    num_notes -= _node_notes_block_size;
1215  }
1216  assert(num_notes == 0, "exact multiple, please");
1217}
1218
1219bool Compile::copy_node_notes_to(Node* dest, Node* source) {
1220  if (source == NULL || dest == NULL)  return false;
1221
1222  if (dest->is_Con())
1223    return false;               // Do not push debug info onto constants.
1224
1225#ifdef ASSERT
1226  // Leave a bread crumb trail pointing to the original node:
1227  if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
1228    dest->set_debug_orig(source);
1229  }
1230#endif
1231
1232  if (node_note_array() == NULL)
1233    return false;               // Not collecting any notes now.
1234
1235  // This is a copy onto a pre-existing node, which may already have notes.
1236  // If both nodes have notes, do not overwrite any pre-existing notes.
1237  Node_Notes* source_notes = node_notes_at(source->_idx);
1238  if (source_notes == NULL || source_notes->is_clear())  return false;
1239  Node_Notes* dest_notes   = node_notes_at(dest->_idx);
1240  if (dest_notes == NULL || dest_notes->is_clear()) {
1241    return set_node_notes_at(dest->_idx, source_notes);
1242  }
1243
1244  Node_Notes merged_notes = (*source_notes);
1245  // The order of operations here ensures that dest notes will win...
1246  merged_notes.update_from(dest_notes);
1247  return set_node_notes_at(dest->_idx, &merged_notes);
1248}
1249
1250
1251//--------------------------allow_range_check_smearing-------------------------
1252// Gating condition for coalescing similar range checks.
1253// Sometimes we try 'speculatively' replacing a series of a range checks by a
1254// single covering check that is at least as strong as any of them.
1255// If the optimization succeeds, the simplified (strengthened) range check
1256// will always succeed.  If it fails, we will deopt, and then give up
1257// on the optimization.
1258bool Compile::allow_range_check_smearing() const {
1259  // If this method has already thrown a range-check,
1260  // assume it was because we already tried range smearing
1261  // and it failed.
1262  uint already_trapped = trap_count(Deoptimization::Reason_range_check);
1263  return !already_trapped;
1264}
1265
1266
1267//------------------------------flatten_alias_type-----------------------------
1268const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
1269  int offset = tj->offset();
1270  TypePtr::PTR ptr = tj->ptr();
1271
1272  // Known instance (scalarizable allocation) alias only with itself.
1273  bool is_known_inst = tj->isa_oopptr() != NULL &&
1274                       tj->is_oopptr()->is_known_instance();
1275
1276  // Process weird unsafe references.
1277  if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1278    assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
1279    assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1280    tj = TypeOopPtr::BOTTOM;
1281    ptr = tj->ptr();
1282    offset = tj->offset();
1283  }
1284
1285  // Array pointers need some flattening
1286  const TypeAryPtr *ta = tj->isa_aryptr();
1287  if( ta && is_known_inst ) {
1288    if ( offset != Type::OffsetBot &&
1289         offset > arrayOopDesc::length_offset_in_bytes() ) {
1290      offset = Type::OffsetBot; // Flatten constant access into array body only
1291      tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
1292    }
1293  } else if( ta && _AliasLevel >= 2 ) {
1294    // For arrays indexed by constant indices, we flatten the alias
1295    // space to include all of the array body.  Only the header, klass
1296    // and array length can be accessed un-aliased.
1297    if( offset != Type::OffsetBot ) {
1298      if( ta->const_oop() ) { // MethodData* or Method*
1299        offset = Type::OffsetBot;   // Flatten constant access into array body
1300        tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
1301      } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1302        // range is OK as-is.
1303        tj = ta = TypeAryPtr::RANGE;
1304      } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1305        tj = TypeInstPtr::KLASS; // all klass loads look alike
1306        ta = TypeAryPtr::RANGE; // generic ignored junk
1307        ptr = TypePtr::BotPTR;
1308      } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1309        tj = TypeInstPtr::MARK;
1310        ta = TypeAryPtr::RANGE; // generic ignored junk
1311        ptr = TypePtr::BotPTR;
1312      } else {                  // Random constant offset into array body
1313        offset = Type::OffsetBot;   // Flatten constant access into array body
1314        tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
1315      }
1316    }
1317    // Arrays of fixed size alias with arrays of unknown size.
1318    if (ta->size() != TypeInt::POS) {
1319      const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1320      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
1321    }
1322    // Arrays of known objects become arrays of unknown objects.
1323    if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1324      const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1325      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1326    }
1327    if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1328      const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1329      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
1330    }
1331    // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1332    // cannot be distinguished by bytecode alone.
1333    if (ta->elem() == TypeInt::BOOL) {
1334      const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1335      ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1336      tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1337    }
1338    // During the 2nd round of IterGVN, NotNull castings are removed.
1339    // Make sure the Bottom and NotNull variants alias the same.
1340    // Also, make sure exact and non-exact variants alias the same.
1341    if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
1342      tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
1343    }
1344  }
1345
1346  // Oop pointers need some flattening
1347  const TypeInstPtr *to = tj->isa_instptr();
1348  if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
1349    ciInstanceKlass *k = to->klass()->as_instance_klass();
1350    if( ptr == TypePtr::Constant ) {
1351      if (to->klass() != ciEnv::current()->Class_klass() ||
1352          offset < k->size_helper() * wordSize) {
1353        // No constant oop pointers (such as Strings); they alias with
1354        // unknown strings.
1355        assert(!is_known_inst, "not scalarizable allocation");
1356        tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1357      }
1358    } else if( is_known_inst ) {
1359      tj = to; // Keep NotNull and klass_is_exact for instance type
1360    } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1361      // During the 2nd round of IterGVN, NotNull castings are removed.
1362      // Make sure the Bottom and NotNull variants alias the same.
1363      // Also, make sure exact and non-exact variants alias the same.
1364      tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
1365    }
1366    // Canonicalize the holder of this field
1367    if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1368      // First handle header references such as a LoadKlassNode, even if the
1369      // object's klass is unloaded at compile time (4965979).
1370      if (!is_known_inst) { // Do it only for non-instance types
1371        tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
1372      }
1373    } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
1374      // Static fields are in the space above the normal instance
1375      // fields in the java.lang.Class instance.
1376      if (to->klass() != ciEnv::current()->Class_klass()) {
1377        to = NULL;
1378        tj = TypeOopPtr::BOTTOM;
1379        offset = tj->offset();
1380      }
1381    } else {
1382      ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
1383      if (!k->equals(canonical_holder) || tj->offset() != offset) {
1384        if( is_known_inst ) {
1385          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
1386        } else {
1387          tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
1388        }
1389      }
1390    }
1391  }
1392
1393  // Klass pointers to object array klasses need some flattening
1394  const TypeKlassPtr *tk = tj->isa_klassptr();
1395  if( tk ) {
1396    // If we are referencing a field within a Klass, we need
1397    // to assume the worst case of an Object.  Both exact and
1398    // inexact types must flatten to the same alias class so
1399    // use NotNull as the PTR.
1400    if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1401
1402      tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
1403                                   TypeKlassPtr::OBJECT->klass(),
1404                                   offset);
1405    }
1406
1407    ciKlass* klass = tk->klass();
1408    if( klass->is_obj_array_klass() ) {
1409      ciKlass* k = TypeAryPtr::OOPS->klass();
1410      if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
1411        k = TypeInstPtr::BOTTOM->klass();
1412      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
1413    }
1414
1415    // Check for precise loads from the primary supertype array and force them
1416    // to the supertype cache alias index.  Check for generic array loads from
1417    // the primary supertype array and also force them to the supertype cache
1418    // alias index.  Since the same load can reach both, we need to merge
1419    // these 2 disparate memories into the same alias class.  Since the
1420    // primary supertype array is read-only, there's no chance of confusion
1421    // where we bypass an array load and an array store.
1422    int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1423    if (offset == Type::OffsetBot ||
1424        (offset >= primary_supers_offset &&
1425         offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1426        offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1427      offset = in_bytes(Klass::secondary_super_cache_offset());
1428      tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
1429    }
1430  }
1431
1432  // Flatten all Raw pointers together.
1433  if (tj->base() == Type::RawPtr)
1434    tj = TypeRawPtr::BOTTOM;
1435
1436  if (tj->base() == Type::AnyPtr)
1437    tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
1438
1439  // Flatten all to bottom for now
1440  switch( _AliasLevel ) {
1441  case 0:
1442    tj = TypePtr::BOTTOM;
1443    break;
1444  case 1:                       // Flatten to: oop, static, field or array
1445    switch (tj->base()) {
1446    //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
1447    case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
1448    case Type::AryPtr:   // do not distinguish arrays at all
1449    case Type::InstPtr:  tj = TypeInstPtr::BOTTOM;  break;
1450    case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
1451    case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
1452    default: ShouldNotReachHere();
1453    }
1454    break;
1455  case 2:                       // No collapsing at level 2; keep all splits
1456  case 3:                       // No collapsing at level 3; keep all splits
1457    break;
1458  default:
1459    Unimplemented();
1460  }
1461
1462  offset = tj->offset();
1463  assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
1464
1465  assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
1466          (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
1467          (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
1468          (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
1469          (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1470          (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
1471          (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
1472          "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
1473  assert( tj->ptr() != TypePtr::TopPTR &&
1474          tj->ptr() != TypePtr::AnyNull &&
1475          tj->ptr() != TypePtr::Null, "No imprecise addresses" );
1476//    assert( tj->ptr() != TypePtr::Constant ||
1477//            tj->base() == Type::RawPtr ||
1478//            tj->base() == Type::KlassPtr, "No constant oop addresses" );
1479
1480  return tj;
1481}
1482
1483void Compile::AliasType::Init(int i, const TypePtr* at) {
1484  _index = i;
1485  _adr_type = at;
1486  _field = NULL;
1487  _is_rewritable = true; // default
1488  const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
1489  if (atoop != NULL && atoop->is_known_instance()) {
1490    const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
1491    _general_index = Compile::current()->get_alias_index(gt);
1492  } else {
1493    _general_index = 0;
1494  }
1495}
1496
1497//---------------------------------print_on------------------------------------
1498#ifndef PRODUCT
1499void Compile::AliasType::print_on(outputStream* st) {
1500  if (index() < 10)
1501        st->print("@ <%d> ", index());
1502  else  st->print("@ <%d>",  index());
1503  st->print(is_rewritable() ? "   " : " RO");
1504  int offset = adr_type()->offset();
1505  if (offset == Type::OffsetBot)
1506        st->print(" +any");
1507  else  st->print(" +%-3d", offset);
1508  st->print(" in ");
1509  adr_type()->dump_on(st);
1510  const TypeOopPtr* tjp = adr_type()->isa_oopptr();
1511  if (field() != NULL && tjp) {
1512    if (tjp->klass()  != field()->holder() ||
1513        tjp->offset() != field()->offset_in_bytes()) {
1514      st->print(" != ");
1515      field()->print();
1516      st->print(" ***");
1517    }
1518  }
1519}
1520
1521void print_alias_types() {
1522  Compile* C = Compile::current();
1523  tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
1524  for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
1525    C->alias_type(idx)->print_on(tty);
1526    tty->cr();
1527  }
1528}
1529#endif
1530
1531
1532//----------------------------probe_alias_cache--------------------------------
1533Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
1534  intptr_t key = (intptr_t) adr_type;
1535  key ^= key >> logAliasCacheSize;
1536  return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1537}
1538
1539
1540//-----------------------------grow_alias_types--------------------------------
1541void Compile::grow_alias_types() {
1542  const int old_ats  = _max_alias_types; // how many before?
1543  const int new_ats  = old_ats;          // how many more?
1544  const int grow_ats = old_ats+new_ats;  // how many now?
1545  _max_alias_types = grow_ats;
1546  _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1547  AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1548  Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1549  for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
1550}
1551
1552
1553//--------------------------------find_alias_type------------------------------
1554Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1555  if (_AliasLevel == 0)
1556    return alias_type(AliasIdxBot);
1557
1558  AliasCacheEntry* ace = probe_alias_cache(adr_type);
1559  if (ace->_adr_type == adr_type) {
1560    return alias_type(ace->_index);
1561  }
1562
1563  // Handle special cases.
1564  if (adr_type == NULL)             return alias_type(AliasIdxTop);
1565  if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
1566
1567  // Do it the slow way.
1568  const TypePtr* flat = flatten_alias_type(adr_type);
1569
1570#ifdef ASSERT
1571  assert(flat == flatten_alias_type(flat), "idempotent");
1572  assert(flat != TypePtr::BOTTOM,     "cannot alias-analyze an untyped ptr");
1573  if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1574    const TypeOopPtr* foop = flat->is_oopptr();
1575    // Scalarizable allocations have exact klass always.
1576    bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1577    const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
1578    assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
1579  }
1580  assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
1581#endif
1582
1583  int idx = AliasIdxTop;
1584  for (int i = 0; i < num_alias_types(); i++) {
1585    if (alias_type(i)->adr_type() == flat) {
1586      idx = i;
1587      break;
1588    }
1589  }
1590
1591  if (idx == AliasIdxTop) {
1592    if (no_create)  return NULL;
1593    // Grow the array if necessary.
1594    if (_num_alias_types == _max_alias_types)  grow_alias_types();
1595    // Add a new alias type.
1596    idx = _num_alias_types++;
1597    _alias_types[idx]->Init(idx, flat);
1598    if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
1599    if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
1600    if (flat->isa_instptr()) {
1601      if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
1602          && flat->is_instptr()->klass() == env()->Class_klass())
1603        alias_type(idx)->set_rewritable(false);
1604    }
1605    if (flat->isa_klassptr()) {
1606      if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1607        alias_type(idx)->set_rewritable(false);
1608      if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
1609        alias_type(idx)->set_rewritable(false);
1610      if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1611        alias_type(idx)->set_rewritable(false);
1612      if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1613        alias_type(idx)->set_rewritable(false);
1614    }
1615    // %%% (We would like to finalize JavaThread::threadObj_offset(),
1616    // but the base pointer type is not distinctive enough to identify
1617    // references into JavaThread.)
1618
1619    // Check for final fields.
1620    const TypeInstPtr* tinst = flat->isa_instptr();
1621    if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1622      ciField* field;
1623      if (tinst->const_oop() != NULL &&
1624          tinst->klass() == ciEnv::current()->Class_klass() &&
1625          tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
1626        // static field
1627        ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1628        field = k->get_field_by_offset(tinst->offset(), true);
1629      } else {
1630        ciInstanceKlass *k = tinst->klass()->as_instance_klass();
1631        field = k->get_field_by_offset(tinst->offset(), false);
1632      }
1633      assert(field == NULL ||
1634             original_field == NULL ||
1635             (field->holder() == original_field->holder() &&
1636              field->offset() == original_field->offset() &&
1637              field->is_static() == original_field->is_static()), "wrong field?");
1638      // Set field() and is_rewritable() attributes.
1639      if (field != NULL)  alias_type(idx)->set_field(field);
1640    }
1641  }
1642
1643  // Fill the cache for next time.
1644  ace->_adr_type = adr_type;
1645  ace->_index    = idx;
1646  assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
1647
1648  // Might as well try to fill the cache for the flattened version, too.
1649  AliasCacheEntry* face = probe_alias_cache(flat);
1650  if (face->_adr_type == NULL) {
1651    face->_adr_type = flat;
1652    face->_index    = idx;
1653    assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1654  }
1655
1656  return alias_type(idx);
1657}
1658
1659
1660Compile::AliasType* Compile::alias_type(ciField* field) {
1661  const TypeOopPtr* t;
1662  if (field->is_static())
1663    t = TypeInstPtr::make(field->holder()->java_mirror());
1664  else
1665    t = TypeOopPtr::make_from_klass_raw(field->holder());
1666  AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1667  assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
1668  return atp;
1669}
1670
1671
1672//------------------------------have_alias_type--------------------------------
1673bool Compile::have_alias_type(const TypePtr* adr_type) {
1674  AliasCacheEntry* ace = probe_alias_cache(adr_type);
1675  if (ace->_adr_type == adr_type) {
1676    return true;
1677  }
1678
1679  // Handle special cases.
1680  if (adr_type == NULL)             return true;
1681  if (adr_type == TypePtr::BOTTOM)  return true;
1682
1683  return find_alias_type(adr_type, true, NULL) != NULL;
1684}
1685
1686//-----------------------------must_alias--------------------------------------
1687// True if all values of the given address type are in the given alias category.
1688bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
1689  if (alias_idx == AliasIdxBot)         return true;  // the universal category
1690  if (adr_type == NULL)                 return true;  // NULL serves as TypePtr::TOP
1691  if (alias_idx == AliasIdxTop)         return false; // the empty category
1692  if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
1693
1694  // the only remaining possible overlap is identity
1695  int adr_idx = get_alias_index(adr_type);
1696  assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1697  assert(adr_idx == alias_idx ||
1698         (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
1699          && adr_type                       != TypeOopPtr::BOTTOM),
1700         "should not be testing for overlap with an unsafe pointer");
1701  return adr_idx == alias_idx;
1702}
1703
1704//------------------------------can_alias--------------------------------------
1705// True if any values of the given address type are in the given alias category.
1706bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
1707  if (alias_idx == AliasIdxTop)         return false; // the empty category
1708  if (adr_type == NULL)                 return false; // NULL serves as TypePtr::TOP
1709  if (alias_idx == AliasIdxBot)         return true;  // the universal category
1710  if (adr_type->base() == Type::AnyPtr) return true;  // TypePtr::BOTTOM or its twins
1711
1712  // the only remaining possible overlap is identity
1713  int adr_idx = get_alias_index(adr_type);
1714  assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
1715  return adr_idx == alias_idx;
1716}
1717
1718
1719
1720//---------------------------pop_warm_call-------------------------------------
1721WarmCallInfo* Compile::pop_warm_call() {
1722  WarmCallInfo* wci = _warm_calls;
1723  if (wci != NULL)  _warm_calls = wci->remove_from(wci);
1724  return wci;
1725}
1726
1727//----------------------------Inline_Warm--------------------------------------
1728int Compile::Inline_Warm() {
1729  // If there is room, try to inline some more warm call sites.
1730  // %%% Do a graph index compaction pass when we think we're out of space?
1731  if (!InlineWarmCalls)  return 0;
1732
1733  int calls_made_hot = 0;
1734  int room_to_grow   = NodeCountInliningCutoff - unique();
1735  int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
1736  int amount_grown   = 0;
1737  WarmCallInfo* call;
1738  while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
1739    int est_size = (int)call->size();
1740    if (est_size > (room_to_grow - amount_grown)) {
1741      // This one won't fit anyway.  Get rid of it.
1742      call->make_cold();
1743      continue;
1744    }
1745    call->make_hot();
1746    calls_made_hot++;
1747    amount_grown   += est_size;
1748    amount_to_grow -= est_size;
1749  }
1750
1751  if (calls_made_hot > 0)  set_major_progress();
1752  return calls_made_hot;
1753}
1754
1755
1756//----------------------------Finish_Warm--------------------------------------
1757void Compile::Finish_Warm() {
1758  if (!InlineWarmCalls)  return;
1759  if (failing())  return;
1760  if (warm_calls() == NULL)  return;
1761
1762  // Clean up loose ends, if we are out of space for inlining.
1763  WarmCallInfo* call;
1764  while ((call = pop_warm_call()) != NULL) {
1765    call->make_cold();
1766  }
1767}
1768
1769//---------------------cleanup_loop_predicates-----------------------
1770// Remove the opaque nodes that protect the predicates so that all unused
1771// checks and uncommon_traps will be eliminated from the ideal graph
1772void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
1773  if (predicate_count()==0) return;
1774  for (int i = predicate_count(); i > 0; i--) {
1775    Node * n = predicate_opaque1_node(i-1);
1776    assert(n->Opcode() == Op_Opaque1, "must be");
1777    igvn.replace_node(n, n->in(1));
1778  }
1779  assert(predicate_count()==0, "should be clean!");
1780}
1781
1782// StringOpts and late inlining of string methods
1783void Compile::inline_string_calls(bool parse_time) {
1784  {
1785    // remove useless nodes to make the usage analysis simpler
1786    ResourceMark rm;
1787    PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1788  }
1789
1790  {
1791    ResourceMark rm;
1792    print_method("Before StringOpts", 3);
1793    PhaseStringOpts pso(initial_gvn(), for_igvn());
1794    print_method("After StringOpts", 3);
1795  }
1796
1797  // now inline anything that we skipped the first time around
1798  if (!parse_time) {
1799    _late_inlines_pos = _late_inlines.length();
1800  }
1801
1802  while (_string_late_inlines.length() > 0) {
1803    CallGenerator* cg = _string_late_inlines.pop();
1804    cg->do_late_inline();
1805    if (failing())  return;
1806  }
1807  _string_late_inlines.trunc_to(0);
1808}
1809
1810void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
1811  assert(IncrementalInline, "incremental inlining should be on");
1812  PhaseGVN* gvn = initial_gvn();
1813
1814  set_inlining_progress(false);
1815  for_igvn()->clear();
1816  gvn->replace_with(&igvn);
1817
1818  int i = 0;
1819
1820  for (; i <_late_inlines.length() && !inlining_progress(); i++) {
1821    CallGenerator* cg = _late_inlines.at(i);
1822    _late_inlines_pos = i+1;
1823    cg->do_late_inline();
1824    if (failing())  return;
1825  }
1826  int j = 0;
1827  for (; i < _late_inlines.length(); i++, j++) {
1828    _late_inlines.at_put(j, _late_inlines.at(i));
1829  }
1830  _late_inlines.trunc_to(j);
1831
1832  {
1833    ResourceMark rm;
1834    PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
1835  }
1836
1837  igvn = PhaseIterGVN(gvn);
1838}
1839
1840// Perform incremental inlining until bound on number of live nodes is reached
1841void Compile::inline_incrementally(PhaseIterGVN& igvn) {
1842  PhaseGVN* gvn = initial_gvn();
1843
1844  set_inlining_incrementally(true);
1845  set_inlining_progress(true);
1846  uint low_live_nodes = 0;
1847
1848  while(inlining_progress() && _late_inlines.length() > 0) {
1849
1850    if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1851      if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
1852        // PhaseIdealLoop is expensive so we only try it once we are
1853        // out of loop and we only try it again if the previous helped
1854        // got the number of nodes down significantly
1855        PhaseIdealLoop ideal_loop( igvn, false, true );
1856        if (failing())  return;
1857        low_live_nodes = live_nodes();
1858        _major_progress = true;
1859      }
1860
1861      if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
1862        break;
1863      }
1864    }
1865
1866    inline_incrementally_one(igvn);
1867
1868    if (failing())  return;
1869
1870    igvn.optimize();
1871
1872    if (failing())  return;
1873  }
1874
1875  assert( igvn._worklist.size() == 0, "should be done with igvn" );
1876
1877  if (_string_late_inlines.length() > 0) {
1878    assert(has_stringbuilder(), "inconsistent");
1879    for_igvn()->clear();
1880    initial_gvn()->replace_with(&igvn);
1881
1882    inline_string_calls(false);
1883
1884    if (failing())  return;
1885
1886    {
1887      ResourceMark rm;
1888      PhaseRemoveUseless pru(initial_gvn(), for_igvn());
1889    }
1890
1891    igvn = PhaseIterGVN(gvn);
1892
1893    igvn.optimize();
1894  }
1895
1896  set_inlining_incrementally(false);
1897}
1898
1899
1900//------------------------------Optimize---------------------------------------
1901// Given a graph, optimize it.
1902void Compile::Optimize() {
1903  TracePhase t1("optimizer", &_t_optimizer, true);
1904
1905#ifndef PRODUCT
1906  if (env()->break_at_compile()) {
1907    BREAKPOINT;
1908  }
1909
1910#endif
1911
1912  ResourceMark rm;
1913  int          loop_opts_cnt;
1914
1915  NOT_PRODUCT( verify_graph_edges(); )
1916
1917  print_method("After Parsing");
1918
1919 {
1920  // Iterative Global Value Numbering, including ideal transforms
1921  // Initialize IterGVN with types and values from parse-time GVN
1922  PhaseIterGVN igvn(initial_gvn());
1923  {
1924    NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
1925    igvn.optimize();
1926  }
1927
1928  print_method("Iter GVN 1", 2);
1929
1930  if (failing())  return;
1931
1932  inline_incrementally(igvn);
1933
1934  print_method("Incremental Inline", 2);
1935
1936  if (failing())  return;
1937
1938  // No more new expensive nodes will be added to the list from here
1939  // so keep only the actual candidates for optimizations.
1940  cleanup_expensive_nodes(igvn);
1941
1942  // Perform escape analysis
1943  if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
1944    if (has_loops()) {
1945      // Cleanup graph (remove dead nodes).
1946      TracePhase t2("idealLoop", &_t_idealLoop, true);
1947      PhaseIdealLoop ideal_loop( igvn, false, true );
1948      if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
1949      if (failing())  return;
1950    }
1951    ConnectionGraph::do_analysis(this, &igvn);
1952
1953    if (failing())  return;
1954
1955    // Optimize out fields loads from scalar replaceable allocations.
1956    igvn.optimize();
1957    print_method("Iter GVN after EA", 2);
1958
1959    if (failing())  return;
1960
1961    if (congraph() != NULL && macro_count() > 0) {
1962      NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
1963      PhaseMacroExpand mexp(igvn);
1964      mexp.eliminate_macro_nodes();
1965      igvn.set_delay_transform(false);
1966
1967      igvn.optimize();
1968      print_method("Iter GVN after eliminating allocations and locks", 2);
1969
1970      if (failing())  return;
1971    }
1972  }
1973
1974  // Loop transforms on the ideal graph.  Range Check Elimination,
1975  // peeling, unrolling, etc.
1976
1977  // Set loop opts counter
1978  loop_opts_cnt = num_loop_opts();
1979  if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
1980    {
1981      TracePhase t2("idealLoop", &_t_idealLoop, true);
1982      PhaseIdealLoop ideal_loop( igvn, true );
1983      loop_opts_cnt--;
1984      if (major_progress()) print_method("PhaseIdealLoop 1", 2);
1985      if (failing())  return;
1986    }
1987    // Loop opts pass if partial peeling occurred in previous pass
1988    if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
1989      TracePhase t3("idealLoop", &_t_idealLoop, true);
1990      PhaseIdealLoop ideal_loop( igvn, false );
1991      loop_opts_cnt--;
1992      if (major_progress()) print_method("PhaseIdealLoop 2", 2);
1993      if (failing())  return;
1994    }
1995    // Loop opts pass for loop-unrolling before CCP
1996    if(major_progress() && (loop_opts_cnt > 0)) {
1997      TracePhase t4("idealLoop", &_t_idealLoop, true);
1998      PhaseIdealLoop ideal_loop( igvn, false );
1999      loop_opts_cnt--;
2000      if (major_progress()) print_method("PhaseIdealLoop 3", 2);
2001    }
2002    if (!failing()) {
2003      // Verify that last round of loop opts produced a valid graph
2004      NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2005      PhaseIdealLoop::verify(igvn);
2006    }
2007  }
2008  if (failing())  return;
2009
2010  // Conditional Constant Propagation;
2011  PhaseCCP ccp( &igvn );
2012  assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
2013  {
2014    TracePhase t2("ccp", &_t_ccp, true);
2015    ccp.do_transform();
2016  }
2017  print_method("PhaseCPP 1", 2);
2018
2019  assert( true, "Break here to ccp.dump_old2new_map()");
2020
2021  // Iterative Global Value Numbering, including ideal transforms
2022  {
2023    NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
2024    igvn = ccp;
2025    igvn.optimize();
2026  }
2027
2028  print_method("Iter GVN 2", 2);
2029
2030  if (failing())  return;
2031
2032  // Loop transforms on the ideal graph.  Range Check Elimination,
2033  // peeling, unrolling, etc.
2034  if(loop_opts_cnt > 0) {
2035    debug_only( int cnt = 0; );
2036    while(major_progress() && (loop_opts_cnt > 0)) {
2037      TracePhase t2("idealLoop", &_t_idealLoop, true);
2038      assert( cnt++ < 40, "infinite cycle in loop optimization" );
2039      PhaseIdealLoop ideal_loop( igvn, true);
2040      loop_opts_cnt--;
2041      if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
2042      if (failing())  return;
2043    }
2044  }
2045
2046  {
2047    // Verify that all previous optimizations produced a valid graph
2048    // at least to this point, even if no loop optimizations were done.
2049    NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
2050    PhaseIdealLoop::verify(igvn);
2051  }
2052
2053  {
2054    NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
2055    PhaseMacroExpand  mex(igvn);
2056    if (mex.expand_macro_nodes()) {
2057      assert(failing(), "must bail out w/ explicit message");
2058      return;
2059    }
2060  }
2061
2062 } // (End scope of igvn; run destructor if necessary for asserts.)
2063
2064  dump_inlining();
2065  // A method with only infinite loops has no edges entering loops from root
2066  {
2067    NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
2068    if (final_graph_reshaping()) {
2069      assert(failing(), "must bail out w/ explicit message");
2070      return;
2071    }
2072  }
2073
2074  print_method("Optimize finished", 2);
2075}
2076
2077
2078//------------------------------Code_Gen---------------------------------------
2079// Given a graph, generate code for it
2080void Compile::Code_Gen() {
2081  if (failing())  return;
2082
2083  // Perform instruction selection.  You might think we could reclaim Matcher
2084  // memory PDQ, but actually the Matcher is used in generating spill code.
2085  // Internals of the Matcher (including some VectorSets) must remain live
2086  // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
2087  // set a bit in reclaimed memory.
2088
2089  // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2090  // nodes.  Mapping is only valid at the root of each matched subtree.
2091  NOT_PRODUCT( verify_graph_edges(); )
2092
2093  Node_List proj_list;
2094  Matcher m(proj_list);
2095  _matcher = &m;
2096  {
2097    TracePhase t2("matcher", &_t_matcher, true);
2098    m.match();
2099  }
2100  // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
2101  // nodes.  Mapping is only valid at the root of each matched subtree.
2102  NOT_PRODUCT( verify_graph_edges(); )
2103
2104  // If you have too many nodes, or if matching has failed, bail out
2105  check_node_count(0, "out of nodes matching instructions");
2106  if (failing())  return;
2107
2108  // Build a proper-looking CFG
2109  PhaseCFG cfg(node_arena(), root(), m);
2110  _cfg = &cfg;
2111  {
2112    NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
2113    cfg.Dominators();
2114    if (failing())  return;
2115
2116    NOT_PRODUCT( verify_graph_edges(); )
2117
2118    cfg.Estimate_Block_Frequency();
2119    cfg.GlobalCodeMotion(m,unique(),proj_list);
2120    if (failing())  return;
2121
2122    print_method("Global code motion", 2);
2123
2124    NOT_PRODUCT( verify_graph_edges(); )
2125
2126    debug_only( cfg.verify(); )
2127  }
2128  NOT_PRODUCT( verify_graph_edges(); )
2129
2130  PhaseChaitin regalloc(unique(),cfg,m);
2131  _regalloc = &regalloc;
2132  {
2133    TracePhase t2("regalloc", &_t_registerAllocation, true);
2134    // Perform any platform dependent preallocation actions.  This is used,
2135    // for example, to avoid taking an implicit null pointer exception
2136    // using the frame pointer on win95.
2137    _regalloc->pd_preallocate_hook();
2138
2139    // Perform register allocation.  After Chaitin, use-def chains are
2140    // no longer accurate (at spill code) and so must be ignored.
2141    // Node->LRG->reg mappings are still accurate.
2142    _regalloc->Register_Allocate();
2143
2144    // Bail out if the allocator builds too many nodes
2145    if (failing())  return;
2146  }
2147
2148  // Prior to register allocation we kept empty basic blocks in case the
2149  // the allocator needed a place to spill.  After register allocation we
2150  // are not adding any new instructions.  If any basic block is empty, we
2151  // can now safely remove it.
2152  {
2153    NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
2154    cfg.remove_empty();
2155    if (do_freq_based_layout()) {
2156      PhaseBlockLayout layout(cfg);
2157    } else {
2158      cfg.set_loop_alignment();
2159    }
2160    cfg.fixup_flow();
2161  }
2162
2163  // Perform any platform dependent postallocation verifications.
2164  debug_only( _regalloc->pd_postallocate_verify_hook(); )
2165
2166  // Apply peephole optimizations
2167  if( OptoPeephole ) {
2168    NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
2169    PhasePeephole peep( _regalloc, cfg);
2170    peep.do_transform();
2171  }
2172
2173  // Convert Nodes to instruction bits in a buffer
2174  {
2175    // %%%% workspace merge brought two timers together for one job
2176    TracePhase t2a("output", &_t_output, true);
2177    NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
2178    Output();
2179  }
2180
2181  print_method("Final Code");
2182
2183  // He's dead, Jim.
2184  _cfg     = (PhaseCFG*)0xdeadbeef;
2185  _regalloc = (PhaseChaitin*)0xdeadbeef;
2186}
2187
2188
2189//------------------------------dump_asm---------------------------------------
2190// Dump formatted assembly
2191#ifndef PRODUCT
2192void Compile::dump_asm(int *pcs, uint pc_limit) {
2193  bool cut_short = false;
2194  tty->print_cr("#");
2195  tty->print("#  ");  _tf->dump();  tty->cr();
2196  tty->print_cr("#");
2197
2198  // For all blocks
2199  int pc = 0x0;                 // Program counter
2200  char starts_bundle = ' ';
2201  _regalloc->dump_frame();
2202
2203  Node *n = NULL;
2204  for( uint i=0; i<_cfg->_num_blocks; i++ ) {
2205    if (VMThread::should_terminate()) { cut_short = true; break; }
2206    Block *b = _cfg->_blocks[i];
2207    if (b->is_connector() && !Verbose) continue;
2208    n = b->_nodes[0];
2209    if (pcs && n->_idx < pc_limit)
2210      tty->print("%3.3x   ", pcs[n->_idx]);
2211    else
2212      tty->print("      ");
2213    b->dump_head( &_cfg->_bbs );
2214    if (b->is_connector()) {
2215      tty->print_cr("        # Empty connector block");
2216    } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
2217      tty->print_cr("        # Block is sole successor of call");
2218    }
2219
2220    // For all instructions
2221    Node *delay = NULL;
2222    for( uint j = 0; j<b->_nodes.size(); j++ ) {
2223      if (VMThread::should_terminate()) { cut_short = true; break; }
2224      n = b->_nodes[j];
2225      if (valid_bundle_info(n)) {
2226        Bundle *bundle = node_bundling(n);
2227        if (bundle->used_in_unconditional_delay()) {
2228          delay = n;
2229          continue;
2230        }
2231        if (bundle->starts_bundle())
2232          starts_bundle = '+';
2233      }
2234
2235      if (WizardMode) n->dump();
2236
2237      if( !n->is_Region() &&    // Dont print in the Assembly
2238          !n->is_Phi() &&       // a few noisely useless nodes
2239          !n->is_Proj() &&
2240          !n->is_MachTemp() &&
2241          !n->is_SafePointScalarObject() &&
2242          !n->is_Catch() &&     // Would be nice to print exception table targets
2243          !n->is_MergeMem() &&  // Not very interesting
2244          !n->is_top() &&       // Debug info table constants
2245          !(n->is_Con() && !n->is_Mach())// Debug info table constants
2246          ) {
2247        if (pcs && n->_idx < pc_limit)
2248          tty->print("%3.3x", pcs[n->_idx]);
2249        else
2250          tty->print("   ");
2251        tty->print(" %c ", starts_bundle);
2252        starts_bundle = ' ';
2253        tty->print("\t");
2254        n->format(_regalloc, tty);
2255        tty->cr();
2256      }
2257
2258      // If we have an instruction with a delay slot, and have seen a delay,
2259      // then back up and print it
2260      if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
2261        assert(delay != NULL, "no unconditional delay instruction");
2262        if (WizardMode) delay->dump();
2263
2264        if (node_bundling(delay)->starts_bundle())
2265          starts_bundle = '+';
2266        if (pcs && n->_idx < pc_limit)
2267          tty->print("%3.3x", pcs[n->_idx]);
2268        else
2269          tty->print("   ");
2270        tty->print(" %c ", starts_bundle);
2271        starts_bundle = ' ';
2272        tty->print("\t");
2273        delay->format(_regalloc, tty);
2274        tty->print_cr("");
2275        delay = NULL;
2276      }
2277
2278      // Dump the exception table as well
2279      if( n->is_Catch() && (Verbose || WizardMode) ) {
2280        // Print the exception table for this offset
2281        _handler_table.print_subtable_for(pc);
2282      }
2283    }
2284
2285    if (pcs && n->_idx < pc_limit)
2286      tty->print_cr("%3.3x", pcs[n->_idx]);
2287    else
2288      tty->print_cr("");
2289
2290    assert(cut_short || delay == NULL, "no unconditional delay branch");
2291
2292  } // End of per-block dump
2293  tty->print_cr("");
2294
2295  if (cut_short)  tty->print_cr("*** disassembly is cut short ***");
2296}
2297#endif
2298
2299//------------------------------Final_Reshape_Counts---------------------------
2300// This class defines counters to help identify when a method
2301// may/must be executed using hardware with only 24-bit precision.
2302struct Final_Reshape_Counts : public StackObj {
2303  int  _call_count;             // count non-inlined 'common' calls
2304  int  _float_count;            // count float ops requiring 24-bit precision
2305  int  _double_count;           // count double ops requiring more precision
2306  int  _java_call_count;        // count non-inlined 'java' calls
2307  int  _inner_loop_count;       // count loops which need alignment
2308  VectorSet _visited;           // Visitation flags
2309  Node_List _tests;             // Set of IfNodes & PCTableNodes
2310
2311  Final_Reshape_Counts() :
2312    _call_count(0), _float_count(0), _double_count(0),
2313    _java_call_count(0), _inner_loop_count(0),
2314    _visited( Thread::current()->resource_area() ) { }
2315
2316  void inc_call_count  () { _call_count  ++; }
2317  void inc_float_count () { _float_count ++; }
2318  void inc_double_count() { _double_count++; }
2319  void inc_java_call_count() { _java_call_count++; }
2320  void inc_inner_loop_count() { _inner_loop_count++; }
2321
2322  int  get_call_count  () const { return _call_count  ; }
2323  int  get_float_count () const { return _float_count ; }
2324  int  get_double_count() const { return _double_count; }
2325  int  get_java_call_count() const { return _java_call_count; }
2326  int  get_inner_loop_count() const { return _inner_loop_count; }
2327};
2328
2329#ifdef ASSERT
2330static bool oop_offset_is_sane(const TypeInstPtr* tp) {
2331  ciInstanceKlass *k = tp->klass()->as_instance_klass();
2332  // Make sure the offset goes inside the instance layout.
2333  return k->contains_field_offset(tp->offset());
2334  // Note that OffsetBot and OffsetTop are very negative.
2335}
2336#endif
2337
2338// Eliminate trivially redundant StoreCMs and accumulate their
2339// precedence edges.
2340void Compile::eliminate_redundant_card_marks(Node* n) {
2341  assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
2342  if (n->in(MemNode::Address)->outcnt() > 1) {
2343    // There are multiple users of the same address so it might be
2344    // possible to eliminate some of the StoreCMs
2345    Node* mem = n->in(MemNode::Memory);
2346    Node* adr = n->in(MemNode::Address);
2347    Node* val = n->in(MemNode::ValueIn);
2348    Node* prev = n;
2349    bool done = false;
2350    // Walk the chain of StoreCMs eliminating ones that match.  As
2351    // long as it's a chain of single users then the optimization is
2352    // safe.  Eliminating partially redundant StoreCMs would require
2353    // cloning copies down the other paths.
2354    while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
2355      if (adr == mem->in(MemNode::Address) &&
2356          val == mem->in(MemNode::ValueIn)) {
2357        // redundant StoreCM
2358        if (mem->req() > MemNode::OopStore) {
2359          // Hasn't been processed by this code yet.
2360          n->add_prec(mem->in(MemNode::OopStore));
2361        } else {
2362          // Already converted to precedence edge
2363          for (uint i = mem->req(); i < mem->len(); i++) {
2364            // Accumulate any precedence edges
2365            if (mem->in(i) != NULL) {
2366              n->add_prec(mem->in(i));
2367            }
2368          }
2369          // Everything above this point has been processed.
2370          done = true;
2371        }
2372        // Eliminate the previous StoreCM
2373        prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
2374        assert(mem->outcnt() == 0, "should be dead");
2375        mem->disconnect_inputs(NULL, this);
2376      } else {
2377        prev = mem;
2378      }
2379      mem = prev->in(MemNode::Memory);
2380    }
2381  }
2382}
2383
2384//------------------------------final_graph_reshaping_impl----------------------
2385// Implement items 1-5 from final_graph_reshaping below.
2386void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
2387
2388  if ( n->outcnt() == 0 ) return; // dead node
2389  uint nop = n->Opcode();
2390
2391  // Check for 2-input instruction with "last use" on right input.
2392  // Swap to left input.  Implements item (2).
2393  if( n->req() == 3 &&          // two-input instruction
2394      n->in(1)->outcnt() > 1 && // left use is NOT a last use
2395      (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
2396      n->in(2)->outcnt() == 1 &&// right use IS a last use
2397      !n->in(2)->is_Con() ) {   // right use is not a constant
2398    // Check for commutative opcode
2399    switch( nop ) {
2400    case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
2401    case Op_MaxI:  case Op_MinI:
2402    case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
2403    case Op_AndL:  case Op_XorL:  case Op_OrL:
2404    case Op_AndI:  case Op_XorI:  case Op_OrI: {
2405      // Move "last use" input to left by swapping inputs
2406      n->swap_edges(1, 2);
2407      break;
2408    }
2409    default:
2410      break;
2411    }
2412  }
2413
2414#ifdef ASSERT
2415  if( n->is_Mem() ) {
2416    int alias_idx = get_alias_index(n->as_Mem()->adr_type());
2417    assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
2418            // oop will be recorded in oop map if load crosses safepoint
2419            n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
2420                             LoadNode::is_immutable_value(n->in(MemNode::Address))),
2421            "raw memory operations should have control edge");
2422  }
2423#endif
2424  // Count FPU ops and common calls, implements item (3)
2425  switch( nop ) {
2426  // Count all float operations that may use FPU
2427  case Op_AddF:
2428  case Op_SubF:
2429  case Op_MulF:
2430  case Op_DivF:
2431  case Op_NegF:
2432  case Op_ModF:
2433  case Op_ConvI2F:
2434  case Op_ConF:
2435  case Op_CmpF:
2436  case Op_CmpF3:
2437  // case Op_ConvL2F: // longs are split into 32-bit halves
2438    frc.inc_float_count();
2439    break;
2440
2441  case Op_ConvF2D:
2442  case Op_ConvD2F:
2443    frc.inc_float_count();
2444    frc.inc_double_count();
2445    break;
2446
2447  // Count all double operations that may use FPU
2448  case Op_AddD:
2449  case Op_SubD:
2450  case Op_MulD:
2451  case Op_DivD:
2452  case Op_NegD:
2453  case Op_ModD:
2454  case Op_ConvI2D:
2455  case Op_ConvD2I:
2456  // case Op_ConvL2D: // handled by leaf call
2457  // case Op_ConvD2L: // handled by leaf call
2458  case Op_ConD:
2459  case Op_CmpD:
2460  case Op_CmpD3:
2461    frc.inc_double_count();
2462    break;
2463  case Op_Opaque1:              // Remove Opaque Nodes before matching
2464  case Op_Opaque2:              // Remove Opaque Nodes before matching
2465    n->subsume_by(n->in(1), this);
2466    break;
2467  case Op_CallStaticJava:
2468  case Op_CallJava:
2469  case Op_CallDynamicJava:
2470    frc.inc_java_call_count(); // Count java call site;
2471  case Op_CallRuntime:
2472  case Op_CallLeaf:
2473  case Op_CallLeafNoFP: {
2474    assert( n->is_Call(), "" );
2475    CallNode *call = n->as_Call();
2476    // Count call sites where the FP mode bit would have to be flipped.
2477    // Do not count uncommon runtime calls:
2478    // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
2479    // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
2480    if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
2481      frc.inc_call_count();   // Count the call site
2482    } else {                  // See if uncommon argument is shared
2483      Node *n = call->in(TypeFunc::Parms);
2484      int nop = n->Opcode();
2485      // Clone shared simple arguments to uncommon calls, item (1).
2486      if( n->outcnt() > 1 &&
2487          !n->is_Proj() &&
2488          nop != Op_CreateEx &&
2489          nop != Op_CheckCastPP &&
2490          nop != Op_DecodeN &&
2491          nop != Op_DecodeNKlass &&
2492          !n->is_Mem() ) {
2493        Node *x = n->clone();
2494        call->set_req( TypeFunc::Parms, x );
2495      }
2496    }
2497    break;
2498  }
2499
2500  case Op_StoreD:
2501  case Op_LoadD:
2502  case Op_LoadD_unaligned:
2503    frc.inc_double_count();
2504    goto handle_mem;
2505  case Op_StoreF:
2506  case Op_LoadF:
2507    frc.inc_float_count();
2508    goto handle_mem;
2509
2510  case Op_StoreCM:
2511    {
2512      // Convert OopStore dependence into precedence edge
2513      Node* prec = n->in(MemNode::OopStore);
2514      n->del_req(MemNode::OopStore);
2515      n->add_prec(prec);
2516      eliminate_redundant_card_marks(n);
2517    }
2518
2519    // fall through
2520
2521  case Op_StoreB:
2522  case Op_StoreC:
2523  case Op_StorePConditional:
2524  case Op_StoreI:
2525  case Op_StoreL:
2526  case Op_StoreIConditional:
2527  case Op_StoreLConditional:
2528  case Op_CompareAndSwapI:
2529  case Op_CompareAndSwapL:
2530  case Op_CompareAndSwapP:
2531  case Op_CompareAndSwapN:
2532  case Op_GetAndAddI:
2533  case Op_GetAndAddL:
2534  case Op_GetAndSetI:
2535  case Op_GetAndSetL:
2536  case Op_GetAndSetP:
2537  case Op_GetAndSetN:
2538  case Op_StoreP:
2539  case Op_StoreN:
2540  case Op_StoreNKlass:
2541  case Op_LoadB:
2542  case Op_LoadUB:
2543  case Op_LoadUS:
2544  case Op_LoadI:
2545  case Op_LoadKlass:
2546  case Op_LoadNKlass:
2547  case Op_LoadL:
2548  case Op_LoadL_unaligned:
2549  case Op_LoadPLocked:
2550  case Op_LoadP:
2551  case Op_LoadN:
2552  case Op_LoadRange:
2553  case Op_LoadS: {
2554  handle_mem:
2555#ifdef ASSERT
2556    if( VerifyOptoOopOffsets ) {
2557      assert( n->is_Mem(), "" );
2558      MemNode *mem  = (MemNode*)n;
2559      // Check to see if address types have grounded out somehow.
2560      const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
2561      assert( !tp || oop_offset_is_sane(tp), "" );
2562    }
2563#endif
2564    break;
2565  }
2566
2567  case Op_AddP: {               // Assert sane base pointers
2568    Node *addp = n->in(AddPNode::Address);
2569    assert( !addp->is_AddP() ||
2570            addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
2571            addp->in(AddPNode::Base) == n->in(AddPNode::Base),
2572            "Base pointers must match" );
2573#ifdef _LP64
2574    if ((UseCompressedOops || UseCompressedKlassPointers) &&
2575        addp->Opcode() == Op_ConP &&
2576        addp == n->in(AddPNode::Base) &&
2577        n->in(AddPNode::Offset)->is_Con()) {
2578      // Use addressing with narrow klass to load with offset on x86.
2579      // On sparc loading 32-bits constant and decoding it have less
2580      // instructions (4) then load 64-bits constant (7).
2581      // Do this transformation here since IGVN will convert ConN back to ConP.
2582      const Type* t = addp->bottom_type();
2583      if (t->isa_oopptr() || t->isa_klassptr()) {
2584        Node* nn = NULL;
2585
2586        int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
2587
2588        // Look for existing ConN node of the same exact type.
2589        Node* r  = root();
2590        uint cnt = r->outcnt();
2591        for (uint i = 0; i < cnt; i++) {
2592          Node* m = r->raw_out(i);
2593          if (m!= NULL && m->Opcode() == op &&
2594              m->bottom_type()->make_ptr() == t) {
2595            nn = m;
2596            break;
2597          }
2598        }
2599        if (nn != NULL) {
2600          // Decode a narrow oop to match address
2601          // [R12 + narrow_oop_reg<<3 + offset]
2602          if (t->isa_oopptr()) {
2603            nn = new (this) DecodeNNode(nn, t);
2604          } else {
2605            nn = new (this) DecodeNKlassNode(nn, t);
2606          }
2607          n->set_req(AddPNode::Base, nn);
2608          n->set_req(AddPNode::Address, nn);
2609          if (addp->outcnt() == 0) {
2610            addp->disconnect_inputs(NULL, this);
2611          }
2612        }
2613      }
2614    }
2615#endif
2616    break;
2617  }
2618
2619#ifdef _LP64
2620  case Op_CastPP:
2621    if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
2622      Node* in1 = n->in(1);
2623      const Type* t = n->bottom_type();
2624      Node* new_in1 = in1->clone();
2625      new_in1->as_DecodeN()->set_type(t);
2626
2627      if (!Matcher::narrow_oop_use_complex_address()) {
2628        //
2629        // x86, ARM and friends can handle 2 adds in addressing mode
2630        // and Matcher can fold a DecodeN node into address by using
2631        // a narrow oop directly and do implicit NULL check in address:
2632        //
2633        // [R12 + narrow_oop_reg<<3 + offset]
2634        // NullCheck narrow_oop_reg
2635        //
2636        // On other platforms (Sparc) we have to keep new DecodeN node and
2637        // use it to do implicit NULL check in address:
2638        //
2639        // decode_not_null narrow_oop_reg, base_reg
2640        // [base_reg + offset]
2641        // NullCheck base_reg
2642        //
2643        // Pin the new DecodeN node to non-null path on these platform (Sparc)
2644        // to keep the information to which NULL check the new DecodeN node
2645        // corresponds to use it as value in implicit_null_check().
2646        //
2647        new_in1->set_req(0, n->in(0));
2648      }
2649
2650      n->subsume_by(new_in1, this);
2651      if (in1->outcnt() == 0) {
2652        in1->disconnect_inputs(NULL, this);
2653      }
2654    }
2655    break;
2656
2657  case Op_CmpP:
2658    // Do this transformation here to preserve CmpPNode::sub() and
2659    // other TypePtr related Ideal optimizations (for example, ptr nullness).
2660    if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
2661      Node* in1 = n->in(1);
2662      Node* in2 = n->in(2);
2663      if (!in1->is_DecodeNarrowPtr()) {
2664        in2 = in1;
2665        in1 = n->in(2);
2666      }
2667      assert(in1->is_DecodeNarrowPtr(), "sanity");
2668
2669      Node* new_in2 = NULL;
2670      if (in2->is_DecodeNarrowPtr()) {
2671        assert(in2->Opcode() == in1->Opcode(), "must be same node type");
2672        new_in2 = in2->in(1);
2673      } else if (in2->Opcode() == Op_ConP) {
2674        const Type* t = in2->bottom_type();
2675        if (t == TypePtr::NULL_PTR) {
2676          assert(in1->is_DecodeN(), "compare klass to null?");
2677          // Don't convert CmpP null check into CmpN if compressed
2678          // oops implicit null check is not generated.
2679          // This will allow to generate normal oop implicit null check.
2680          if (Matcher::gen_narrow_oop_implicit_null_checks())
2681            new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
2682          //
2683          // This transformation together with CastPP transformation above
2684          // will generated code for implicit NULL checks for compressed oops.
2685          //
2686          // The original code after Optimize()
2687          //
2688          //    LoadN memory, narrow_oop_reg
2689          //    decode narrow_oop_reg, base_reg
2690          //    CmpP base_reg, NULL
2691          //    CastPP base_reg // NotNull
2692          //    Load [base_reg + offset], val_reg
2693          //
2694          // after these transformations will be
2695          //
2696          //    LoadN memory, narrow_oop_reg
2697          //    CmpN narrow_oop_reg, NULL
2698          //    decode_not_null narrow_oop_reg, base_reg
2699          //    Load [base_reg + offset], val_reg
2700          //
2701          // and the uncommon path (== NULL) will use narrow_oop_reg directly
2702          // since narrow oops can be used in debug info now (see the code in
2703          // final_graph_reshaping_walk()).
2704          //
2705          // At the end the code will be matched to
2706          // on x86:
2707          //
2708          //    Load_narrow_oop memory, narrow_oop_reg
2709          //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
2710          //    NullCheck narrow_oop_reg
2711          //
2712          // and on sparc:
2713          //
2714          //    Load_narrow_oop memory, narrow_oop_reg
2715          //    decode_not_null narrow_oop_reg, base_reg
2716          //    Load [base_reg + offset], val_reg
2717          //    NullCheck base_reg
2718          //
2719        } else if (t->isa_oopptr()) {
2720          new_in2 = ConNode::make(this, t->make_narrowoop());
2721        } else if (t->isa_klassptr()) {
2722          new_in2 = ConNode::make(this, t->make_narrowklass());
2723        }
2724      }
2725      if (new_in2 != NULL) {
2726        Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
2727        n->subsume_by(cmpN, this);
2728        if (in1->outcnt() == 0) {
2729          in1->disconnect_inputs(NULL, this);
2730        }
2731        if (in2->outcnt() == 0) {
2732          in2->disconnect_inputs(NULL, this);
2733        }
2734      }
2735    }
2736    break;
2737
2738  case Op_DecodeN:
2739  case Op_DecodeNKlass:
2740    assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
2741    // DecodeN could be pinned when it can't be fold into
2742    // an address expression, see the code for Op_CastPP above.
2743    assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
2744    break;
2745
2746  case Op_EncodeP:
2747  case Op_EncodePKlass: {
2748    Node* in1 = n->in(1);
2749    if (in1->is_DecodeNarrowPtr()) {
2750      n->subsume_by(in1->in(1), this);
2751    } else if (in1->Opcode() == Op_ConP) {
2752      const Type* t = in1->bottom_type();
2753      if (t == TypePtr::NULL_PTR) {
2754        assert(t->isa_oopptr(), "null klass?");
2755        n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
2756      } else if (t->isa_oopptr()) {
2757        n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
2758      } else if (t->isa_klassptr()) {
2759        n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
2760      }
2761    }
2762    if (in1->outcnt() == 0) {
2763      in1->disconnect_inputs(NULL, this);
2764    }
2765    break;
2766  }
2767
2768  case Op_Proj: {
2769    if (OptimizeStringConcat) {
2770      ProjNode* p = n->as_Proj();
2771      if (p->_is_io_use) {
2772        // Separate projections were used for the exception path which
2773        // are normally removed by a late inline.  If it wasn't inlined
2774        // then they will hang around and should just be replaced with
2775        // the original one.
2776        Node* proj = NULL;
2777        // Replace with just one
2778        for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
2779          Node *use = i.get();
2780          if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
2781            proj = use;
2782            break;
2783          }
2784        }
2785        assert(proj != NULL, "must be found");
2786        p->subsume_by(proj, this);
2787      }
2788    }
2789    break;
2790  }
2791
2792  case Op_Phi:
2793    if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
2794      // The EncodeP optimization may create Phi with the same edges
2795      // for all paths. It is not handled well by Register Allocator.
2796      Node* unique_in = n->in(1);
2797      assert(unique_in != NULL, "");
2798      uint cnt = n->req();
2799      for (uint i = 2; i < cnt; i++) {
2800        Node* m = n->in(i);
2801        assert(m != NULL, "");
2802        if (unique_in != m)
2803          unique_in = NULL;
2804      }
2805      if (unique_in != NULL) {
2806        n->subsume_by(unique_in, this);
2807      }
2808    }
2809    break;
2810
2811#endif
2812
2813  case Op_ModI:
2814    if (UseDivMod) {
2815      // Check if a%b and a/b both exist
2816      Node* d = n->find_similar(Op_DivI);
2817      if (d) {
2818        // Replace them with a fused divmod if supported
2819        if (Matcher::has_match_rule(Op_DivModI)) {
2820          DivModINode* divmod = DivModINode::make(this, n);
2821          d->subsume_by(divmod->div_proj(), this);
2822          n->subsume_by(divmod->mod_proj(), this);
2823        } else {
2824          // replace a%b with a-((a/b)*b)
2825          Node* mult = new (this) MulINode(d, d->in(2));
2826          Node* sub  = new (this) SubINode(d->in(1), mult);
2827          n->subsume_by(sub, this);
2828        }
2829      }
2830    }
2831    break;
2832
2833  case Op_ModL:
2834    if (UseDivMod) {
2835      // Check if a%b and a/b both exist
2836      Node* d = n->find_similar(Op_DivL);
2837      if (d) {
2838        // Replace them with a fused divmod if supported
2839        if (Matcher::has_match_rule(Op_DivModL)) {
2840          DivModLNode* divmod = DivModLNode::make(this, n);
2841          d->subsume_by(divmod->div_proj(), this);
2842          n->subsume_by(divmod->mod_proj(), this);
2843        } else {
2844          // replace a%b with a-((a/b)*b)
2845          Node* mult = new (this) MulLNode(d, d->in(2));
2846          Node* sub  = new (this) SubLNode(d->in(1), mult);
2847          n->subsume_by(sub, this);
2848        }
2849      }
2850    }
2851    break;
2852
2853  case Op_LoadVector:
2854  case Op_StoreVector:
2855    break;
2856
2857  case Op_PackB:
2858  case Op_PackS:
2859  case Op_PackI:
2860  case Op_PackF:
2861  case Op_PackL:
2862  case Op_PackD:
2863    if (n->req()-1 > 2) {
2864      // Replace many operand PackNodes with a binary tree for matching
2865      PackNode* p = (PackNode*) n;
2866      Node* btp = p->binary_tree_pack(this, 1, n->req());
2867      n->subsume_by(btp, this);
2868    }
2869    break;
2870  case Op_Loop:
2871  case Op_CountedLoop:
2872    if (n->as_Loop()->is_inner_loop()) {
2873      frc.inc_inner_loop_count();
2874    }
2875    break;
2876  case Op_LShiftI:
2877  case Op_RShiftI:
2878  case Op_URShiftI:
2879  case Op_LShiftL:
2880  case Op_RShiftL:
2881  case Op_URShiftL:
2882    if (Matcher::need_masked_shift_count) {
2883      // The cpu's shift instructions don't restrict the count to the
2884      // lower 5/6 bits. We need to do the masking ourselves.
2885      Node* in2 = n->in(2);
2886      juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
2887      const TypeInt* t = in2->find_int_type();
2888      if (t != NULL && t->is_con()) {
2889        juint shift = t->get_con();
2890        if (shift > mask) { // Unsigned cmp
2891          n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
2892        }
2893      } else {
2894        if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
2895          Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
2896          n->set_req(2, shift);
2897        }
2898      }
2899      if (in2->outcnt() == 0) { // Remove dead node
2900        in2->disconnect_inputs(NULL, this);
2901      }
2902    }
2903    break;
2904  case Op_MemBarStoreStore:
2905    // Break the link with AllocateNode: it is no longer useful and
2906    // confuses register allocation.
2907    if (n->req() > MemBarNode::Precedent) {
2908      n->set_req(MemBarNode::Precedent, top());
2909    }
2910    break;
2911  default:
2912    assert( !n->is_Call(), "" );
2913    assert( !n->is_Mem(), "" );
2914    break;
2915  }
2916
2917  // Collect CFG split points
2918  if (n->is_MultiBranch())
2919    frc._tests.push(n);
2920}
2921
2922//------------------------------final_graph_reshaping_walk---------------------
2923// Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
2924// requires that the walk visits a node's inputs before visiting the node.
2925void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
2926  ResourceArea *area = Thread::current()->resource_area();
2927  Unique_Node_List sfpt(area);
2928
2929  frc._visited.set(root->_idx); // first, mark node as visited
2930  uint cnt = root->req();
2931  Node *n = root;
2932  uint  i = 0;
2933  while (true) {
2934    if (i < cnt) {
2935      // Place all non-visited non-null inputs onto stack
2936      Node* m = n->in(i);
2937      ++i;
2938      if (m != NULL && !frc._visited.test_set(m->_idx)) {
2939        if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
2940          sfpt.push(m);
2941        cnt = m->req();
2942        nstack.push(n, i); // put on stack parent and next input's index
2943        n = m;
2944        i = 0;
2945      }
2946    } else {
2947      // Now do post-visit work
2948      final_graph_reshaping_impl( n, frc );
2949      if (nstack.is_empty())
2950        break;             // finished
2951      n = nstack.node();   // Get node from stack
2952      cnt = n->req();
2953      i = nstack.index();
2954      nstack.pop();        // Shift to the next node on stack
2955    }
2956  }
2957
2958  // Skip next transformation if compressed oops are not used.
2959  if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
2960      (!UseCompressedOops && !UseCompressedKlassPointers))
2961    return;
2962
2963  // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
2964  // It could be done for an uncommon traps or any safepoints/calls
2965  // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
2966  while (sfpt.size() > 0) {
2967    n = sfpt.pop();
2968    JVMState *jvms = n->as_SafePoint()->jvms();
2969    assert(jvms != NULL, "sanity");
2970    int start = jvms->debug_start();
2971    int end   = n->req();
2972    bool is_uncommon = (n->is_CallStaticJava() &&
2973                        n->as_CallStaticJava()->uncommon_trap_request() != 0);
2974    for (int j = start; j < end; j++) {
2975      Node* in = n->in(j);
2976      if (in->is_DecodeNarrowPtr()) {
2977        bool safe_to_skip = true;
2978        if (!is_uncommon ) {
2979          // Is it safe to skip?
2980          for (uint i = 0; i < in->outcnt(); i++) {
2981            Node* u = in->raw_out(i);
2982            if (!u->is_SafePoint() ||
2983                 u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
2984              safe_to_skip = false;
2985            }
2986          }
2987        }
2988        if (safe_to_skip) {
2989          n->set_req(j, in->in(1));
2990        }
2991        if (in->outcnt() == 0) {
2992          in->disconnect_inputs(NULL, this);
2993        }
2994      }
2995    }
2996  }
2997}
2998
2999//------------------------------final_graph_reshaping--------------------------
3000// Final Graph Reshaping.
3001//
3002// (1) Clone simple inputs to uncommon calls, so they can be scheduled late
3003//     and not commoned up and forced early.  Must come after regular
3004//     optimizations to avoid GVN undoing the cloning.  Clone constant
3005//     inputs to Loop Phis; these will be split by the allocator anyways.
3006//     Remove Opaque nodes.
3007// (2) Move last-uses by commutative operations to the left input to encourage
3008//     Intel update-in-place two-address operations and better register usage
3009//     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
3010//     calls canonicalizing them back.
3011// (3) Count the number of double-precision FP ops, single-precision FP ops
3012//     and call sites.  On Intel, we can get correct rounding either by
3013//     forcing singles to memory (requires extra stores and loads after each
3014//     FP bytecode) or we can set a rounding mode bit (requires setting and
3015//     clearing the mode bit around call sites).  The mode bit is only used
3016//     if the relative frequency of single FP ops to calls is low enough.
3017//     This is a key transform for SPEC mpeg_audio.
3018// (4) Detect infinite loops; blobs of code reachable from above but not
3019//     below.  Several of the Code_Gen algorithms fail on such code shapes,
3020//     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
3021//     from time to time in other codes (such as -Xcomp finalizer loops, etc).
3022//     Detection is by looking for IfNodes where only 1 projection is
3023//     reachable from below or CatchNodes missing some targets.
3024// (5) Assert for insane oop offsets in debug mode.
3025
3026bool Compile::final_graph_reshaping() {
3027  // an infinite loop may have been eliminated by the optimizer,
3028  // in which case the graph will be empty.
3029  if (root()->req() == 1) {
3030    record_method_not_compilable("trivial infinite loop");
3031    return true;
3032  }
3033
3034  // Expensive nodes have their control input set to prevent the GVN
3035  // from freely commoning them. There's no GVN beyond this point so
3036  // no need to keep the control input. We want the expensive nodes to
3037  // be freely moved to the least frequent code path by gcm.
3038  assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
3039  for (int i = 0; i < expensive_count(); i++) {
3040    _expensive_nodes->at(i)->set_req(0, NULL);
3041  }
3042
3043  Final_Reshape_Counts frc;
3044
3045  // Visit everybody reachable!
3046  // Allocate stack of size C->unique()/2 to avoid frequent realloc
3047  Node_Stack nstack(unique() >> 1);
3048  final_graph_reshaping_walk(nstack, root(), frc);
3049
3050  // Check for unreachable (from below) code (i.e., infinite loops).
3051  for( uint i = 0; i < frc._tests.size(); i++ ) {
3052    MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
3053    // Get number of CFG targets.
3054    // Note that PCTables include exception targets after calls.
3055    uint required_outcnt = n->required_outcnt();
3056    if (n->outcnt() != required_outcnt) {
3057      // Check for a few special cases.  Rethrow Nodes never take the
3058      // 'fall-thru' path, so expected kids is 1 less.
3059      if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
3060        if (n->in(0)->in(0)->is_Call()) {
3061          CallNode *call = n->in(0)->in(0)->as_Call();
3062          if (call->entry_point() == OptoRuntime::rethrow_stub()) {
3063            required_outcnt--;      // Rethrow always has 1 less kid
3064          } else if (call->req() > TypeFunc::Parms &&
3065                     call->is_CallDynamicJava()) {
3066            // Check for null receiver. In such case, the optimizer has
3067            // detected that the virtual call will always result in a null
3068            // pointer exception. The fall-through projection of this CatchNode
3069            // will not be populated.
3070            Node *arg0 = call->in(TypeFunc::Parms);
3071            if (arg0->is_Type() &&
3072                arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
3073              required_outcnt--;
3074            }
3075          } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
3076                     call->req() > TypeFunc::Parms+1 &&
3077                     call->is_CallStaticJava()) {
3078            // Check for negative array length. In such case, the optimizer has
3079            // detected that the allocation attempt will always result in an
3080            // exception. There is no fall-through projection of this CatchNode .
3081            Node *arg1 = call->in(TypeFunc::Parms+1);
3082            if (arg1->is_Type() &&
3083                arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
3084              required_outcnt--;
3085            }
3086          }
3087        }
3088      }
3089      // Recheck with a better notion of 'required_outcnt'
3090      if (n->outcnt() != required_outcnt) {
3091        record_method_not_compilable("malformed control flow");
3092        return true;            // Not all targets reachable!
3093      }
3094    }
3095    // Check that I actually visited all kids.  Unreached kids
3096    // must be infinite loops.
3097    for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
3098      if (!frc._visited.test(n->fast_out(j)->_idx)) {
3099        record_method_not_compilable("infinite loop");
3100        return true;            // Found unvisited kid; must be unreach
3101      }
3102  }
3103
3104  // If original bytecodes contained a mixture of floats and doubles
3105  // check if the optimizer has made it homogenous, item (3).
3106  if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
3107      frc.get_float_count() > 32 &&
3108      frc.get_double_count() == 0 &&
3109      (10 * frc.get_call_count() < frc.get_float_count()) ) {
3110    set_24_bit_selection_and_mode( false,  true );
3111  }
3112
3113  set_java_calls(frc.get_java_call_count());
3114  set_inner_loops(frc.get_inner_loop_count());
3115
3116  // No infinite loops, no reason to bail out.
3117  return false;
3118}
3119
3120//-----------------------------too_many_traps----------------------------------
3121// Report if there are too many traps at the current method and bci.
3122// Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
3123bool Compile::too_many_traps(ciMethod* method,
3124                             int bci,
3125                             Deoptimization::DeoptReason reason) {
3126  ciMethodData* md = method->method_data();
3127  if (md->is_empty()) {
3128    // Assume the trap has not occurred, or that it occurred only
3129    // because of a transient condition during start-up in the interpreter.
3130    return false;
3131  }
3132  if (md->has_trap_at(bci, reason) != 0) {
3133    // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
3134    // Also, if there are multiple reasons, or if there is no per-BCI record,
3135    // assume the worst.
3136    if (log())
3137      log()->elem("observe trap='%s' count='%d'",
3138                  Deoptimization::trap_reason_name(reason),
3139                  md->trap_count(reason));
3140    return true;
3141  } else {
3142    // Ignore method/bci and see if there have been too many globally.
3143    return too_many_traps(reason, md);
3144  }
3145}
3146
3147// Less-accurate variant which does not require a method and bci.
3148bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
3149                             ciMethodData* logmd) {
3150 if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
3151    // Too many traps globally.
3152    // Note that we use cumulative trap_count, not just md->trap_count.
3153    if (log()) {
3154      int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
3155      log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
3156                  Deoptimization::trap_reason_name(reason),
3157                  mcount, trap_count(reason));
3158    }
3159    return true;
3160  } else {
3161    // The coast is clear.
3162    return false;
3163  }
3164}
3165
3166//--------------------------too_many_recompiles--------------------------------
3167// Report if there are too many recompiles at the current method and bci.
3168// Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
3169// Is not eager to return true, since this will cause the compiler to use
3170// Action_none for a trap point, to avoid too many recompilations.
3171bool Compile::too_many_recompiles(ciMethod* method,
3172                                  int bci,
3173                                  Deoptimization::DeoptReason reason) {
3174  ciMethodData* md = method->method_data();
3175  if (md->is_empty()) {
3176    // Assume the trap has not occurred, or that it occurred only
3177    // because of a transient condition during start-up in the interpreter.
3178    return false;
3179  }
3180  // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
3181  uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
3182  uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
3183  Deoptimization::DeoptReason per_bc_reason
3184    = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
3185  if ((per_bc_reason == Deoptimization::Reason_none
3186       || md->has_trap_at(bci, reason) != 0)
3187      // The trap frequency measure we care about is the recompile count:
3188      && md->trap_recompiled_at(bci)
3189      && md->overflow_recompile_count() >= bc_cutoff) {
3190    // Do not emit a trap here if it has already caused recompilations.
3191    // Also, if there are multiple reasons, or if there is no per-BCI record,
3192    // assume the worst.
3193    if (log())
3194      log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
3195                  Deoptimization::trap_reason_name(reason),
3196                  md->trap_count(reason),
3197                  md->overflow_recompile_count());
3198    return true;
3199  } else if (trap_count(reason) != 0
3200             && decompile_count() >= m_cutoff) {
3201    // Too many recompiles globally, and we have seen this sort of trap.
3202    // Use cumulative decompile_count, not just md->decompile_count.
3203    if (log())
3204      log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
3205                  Deoptimization::trap_reason_name(reason),
3206                  md->trap_count(reason), trap_count(reason),
3207                  md->decompile_count(), decompile_count());
3208    return true;
3209  } else {
3210    // The coast is clear.
3211    return false;
3212  }
3213}
3214
3215
3216#ifndef PRODUCT
3217//------------------------------verify_graph_edges---------------------------
3218// Walk the Graph and verify that there is a one-to-one correspondence
3219// between Use-Def edges and Def-Use edges in the graph.
3220void Compile::verify_graph_edges(bool no_dead_code) {
3221  if (VerifyGraphEdges) {
3222    ResourceArea *area = Thread::current()->resource_area();
3223    Unique_Node_List visited(area);
3224    // Call recursive graph walk to check edges
3225    _root->verify_edges(visited);
3226    if (no_dead_code) {
3227      // Now make sure that no visited node is used by an unvisited node.
3228      bool dead_nodes = 0;
3229      Unique_Node_List checked(area);
3230      while (visited.size() > 0) {
3231        Node* n = visited.pop();
3232        checked.push(n);
3233        for (uint i = 0; i < n->outcnt(); i++) {
3234          Node* use = n->raw_out(i);
3235          if (checked.member(use))  continue;  // already checked
3236          if (visited.member(use))  continue;  // already in the graph
3237          if (use->is_Con())        continue;  // a dead ConNode is OK
3238          // At this point, we have found a dead node which is DU-reachable.
3239          if (dead_nodes++ == 0)
3240            tty->print_cr("*** Dead nodes reachable via DU edges:");
3241          use->dump(2);
3242          tty->print_cr("---");
3243          checked.push(use);  // No repeats; pretend it is now checked.
3244        }
3245      }
3246      assert(dead_nodes == 0, "using nodes must be reachable from root");
3247    }
3248  }
3249}
3250#endif
3251
3252// The Compile object keeps track of failure reasons separately from the ciEnv.
3253// This is required because there is not quite a 1-1 relation between the
3254// ciEnv and its compilation task and the Compile object.  Note that one
3255// ciEnv might use two Compile objects, if C2Compiler::compile_method decides
3256// to backtrack and retry without subsuming loads.  Other than this backtracking
3257// behavior, the Compile's failure reason is quietly copied up to the ciEnv
3258// by the logic in C2Compiler.
3259void Compile::record_failure(const char* reason) {
3260  if (log() != NULL) {
3261    log()->elem("failure reason='%s' phase='compile'", reason);
3262  }
3263  if (_failure_reason == NULL) {
3264    // Record the first failure reason.
3265    _failure_reason = reason;
3266  }
3267  if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
3268    C->print_method(_failure_reason);
3269  }
3270  _root = NULL;  // flush the graph, too
3271}
3272
3273Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
3274  : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
3275    _phase_name(name), _dolog(dolog)
3276{
3277  if (dolog) {
3278    C = Compile::current();
3279    _log = C->log();
3280  } else {
3281    C = NULL;
3282    _log = NULL;
3283  }
3284  if (_log != NULL) {
3285    _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3286    _log->stamp();
3287    _log->end_head();
3288  }
3289}
3290
3291Compile::TracePhase::~TracePhase() {
3292
3293  C = Compile::current();
3294  if (_dolog) {
3295    _log = C->log();
3296  } else {
3297    _log = NULL;
3298  }
3299
3300#ifdef ASSERT
3301  if (PrintIdealNodeCount) {
3302    tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
3303                  _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
3304  }
3305
3306  if (VerifyIdealNodeCount) {
3307    Compile::current()->print_missing_nodes();
3308  }
3309#endif
3310
3311  if (_log != NULL) {
3312    _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
3313  }
3314}
3315
3316//=============================================================================
3317// Two Constant's are equal when the type and the value are equal.
3318bool Compile::Constant::operator==(const Constant& other) {
3319  if (type()          != other.type()         )  return false;
3320  if (can_be_reused() != other.can_be_reused())  return false;
3321  // For floating point values we compare the bit pattern.
3322  switch (type()) {
3323  case T_FLOAT:   return (_v._value.i == other._v._value.i);
3324  case T_LONG:
3325  case T_DOUBLE:  return (_v._value.j == other._v._value.j);
3326  case T_OBJECT:
3327  case T_ADDRESS: return (_v._value.l == other._v._value.l);
3328  case T_VOID:    return (_v._value.l == other._v._value.l);  // jump-table entries
3329  case T_METADATA: return (_v._metadata == other._v._metadata);
3330  default: ShouldNotReachHere();
3331  }
3332  return false;
3333}
3334
3335static int type_to_size_in_bytes(BasicType t) {
3336  switch (t) {
3337  case T_LONG:    return sizeof(jlong  );
3338  case T_FLOAT:   return sizeof(jfloat );
3339  case T_DOUBLE:  return sizeof(jdouble);
3340  case T_METADATA: return sizeof(Metadata*);
3341    // We use T_VOID as marker for jump-table entries (labels) which
3342    // need an internal word relocation.
3343  case T_VOID:
3344  case T_ADDRESS:
3345  case T_OBJECT:  return sizeof(jobject);
3346  }
3347
3348  ShouldNotReachHere();
3349  return -1;
3350}
3351
3352int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
3353  // sort descending
3354  if (a->freq() > b->freq())  return -1;
3355  if (a->freq() < b->freq())  return  1;
3356  return 0;
3357}
3358
3359void Compile::ConstantTable::calculate_offsets_and_size() {
3360  // First, sort the array by frequencies.
3361  _constants.sort(qsort_comparator);
3362
3363#ifdef ASSERT
3364  // Make sure all jump-table entries were sorted to the end of the
3365  // array (they have a negative frequency).
3366  bool found_void = false;
3367  for (int i = 0; i < _constants.length(); i++) {
3368    Constant con = _constants.at(i);
3369    if (con.type() == T_VOID)
3370      found_void = true;  // jump-tables
3371    else
3372      assert(!found_void, "wrong sorting");
3373  }
3374#endif
3375
3376  int offset = 0;
3377  for (int i = 0; i < _constants.length(); i++) {
3378    Constant* con = _constants.adr_at(i);
3379
3380    // Align offset for type.
3381    int typesize = type_to_size_in_bytes(con->type());
3382    offset = align_size_up(offset, typesize);
3383    con->set_offset(offset);   // set constant's offset
3384
3385    if (con->type() == T_VOID) {
3386      MachConstantNode* n = (MachConstantNode*) con->get_jobject();
3387      offset = offset + typesize * n->outcnt();  // expand jump-table
3388    } else {
3389      offset = offset + typesize;
3390    }
3391  }
3392
3393  // Align size up to the next section start (which is insts; see
3394  // CodeBuffer::align_at_start).
3395  assert(_size == -1, "already set?");
3396  _size = align_size_up(offset, CodeEntryAlignment);
3397}
3398
3399void Compile::ConstantTable::emit(CodeBuffer& cb) {
3400  MacroAssembler _masm(&cb);
3401  for (int i = 0; i < _constants.length(); i++) {
3402    Constant con = _constants.at(i);
3403    address constant_addr;
3404    switch (con.type()) {
3405    case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
3406    case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
3407    case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
3408    case T_OBJECT: {
3409      jobject obj = con.get_jobject();
3410      int oop_index = _masm.oop_recorder()->find_index(obj);
3411      constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
3412      break;
3413    }
3414    case T_ADDRESS: {
3415      address addr = (address) con.get_jobject();
3416      constant_addr = _masm.address_constant(addr);
3417      break;
3418    }
3419    // We use T_VOID as marker for jump-table entries (labels) which
3420    // need an internal word relocation.
3421    case T_VOID: {
3422      MachConstantNode* n = (MachConstantNode*) con.get_jobject();
3423      // Fill the jump-table with a dummy word.  The real value is
3424      // filled in later in fill_jump_table.
3425      address dummy = (address) n;
3426      constant_addr = _masm.address_constant(dummy);
3427      // Expand jump-table
3428      for (uint i = 1; i < n->outcnt(); i++) {
3429        address temp_addr = _masm.address_constant(dummy + i);
3430        assert(temp_addr, "consts section too small");
3431      }
3432      break;
3433    }
3434    case T_METADATA: {
3435      Metadata* obj = con.get_metadata();
3436      int metadata_index = _masm.oop_recorder()->find_index(obj);
3437      constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index));
3438      break;
3439    }
3440    default: ShouldNotReachHere();
3441    }
3442    assert(constant_addr, "consts section too small");
3443    assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg_res("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
3444  }
3445}
3446
3447int Compile::ConstantTable::find_offset(Constant& con) const {
3448  int idx = _constants.find(con);
3449  assert(idx != -1, "constant must be in constant table");
3450  int offset = _constants.at(idx).offset();
3451  assert(offset != -1, "constant table not emitted yet?");
3452  return offset;
3453}
3454
3455void Compile::ConstantTable::add(Constant& con) {
3456  if (con.can_be_reused()) {
3457    int idx = _constants.find(con);
3458    if (idx != -1 && _constants.at(idx).can_be_reused()) {
3459      _constants.adr_at(idx)->inc_freq(con.freq());  // increase the frequency by the current value
3460      return;
3461    }
3462  }
3463  (void) _constants.append(con);
3464}
3465
3466Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
3467  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
3468  Constant con(type, value, b->_freq);
3469  add(con);
3470  return con;
3471}
3472
3473Compile::Constant Compile::ConstantTable::add(Metadata* metadata) {
3474  Constant con(metadata);
3475  add(con);
3476  return con;
3477}
3478
3479Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) {
3480  jvalue value;
3481  BasicType type = oper->type()->basic_type();
3482  switch (type) {
3483  case T_LONG:    value.j = oper->constantL(); break;
3484  case T_FLOAT:   value.f = oper->constantF(); break;
3485  case T_DOUBLE:  value.d = oper->constantD(); break;
3486  case T_OBJECT:
3487  case T_ADDRESS: value.l = (jobject) oper->constant(); break;
3488  case T_METADATA: return add((Metadata*)oper->constant()); break;
3489  default: guarantee(false, err_msg_res("unhandled type: %s", type2name(type)));
3490  }
3491  return add(n, type, value);
3492}
3493
3494Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) {
3495  jvalue value;
3496  // We can use the node pointer here to identify the right jump-table
3497  // as this method is called from Compile::Fill_buffer right before
3498  // the MachNodes are emitted and the jump-table is filled (means the
3499  // MachNode pointers do not change anymore).
3500  value.l = (jobject) n;
3501  Constant con(T_VOID, value, next_jump_table_freq(), false);  // Labels of a jump-table cannot be reused.
3502  add(con);
3503  return con;
3504}
3505
3506void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
3507  // If called from Compile::scratch_emit_size do nothing.
3508  if (Compile::current()->in_scratch_emit_size())  return;
3509
3510  assert(labels.is_nonempty(), "must be");
3511  assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt()));
3512
3513  // Since MachConstantNode::constant_offset() also contains
3514  // table_base_offset() we need to subtract the table_base_offset()
3515  // to get the plain offset into the constant table.
3516  int offset = n->constant_offset() - table_base_offset();
3517
3518  MacroAssembler _masm(&cb);
3519  address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
3520
3521  for (uint i = 0; i < n->outcnt(); i++) {
3522    address* constant_addr = &jump_table_base[i];
3523    assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, *constant_addr, (((address) n) + i)));
3524    *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
3525    cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
3526  }
3527}
3528
3529void Compile::dump_inlining() {
3530  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
3531    // Print inlining message for candidates that we couldn't inline
3532    // for lack of space or non constant receiver
3533    for (int i = 0; i < _late_inlines.length(); i++) {
3534      CallGenerator* cg = _late_inlines.at(i);
3535      cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff");
3536    }
3537    Unique_Node_List useful;
3538    useful.push(root());
3539    for (uint next = 0; next < useful.size(); ++next) {
3540      Node* n  = useful.at(next);
3541      if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
3542        CallNode* call = n->as_Call();
3543        CallGenerator* cg = call->generator();
3544        cg->print_inlining_late("receiver not constant");
3545      }
3546      uint max = n->len();
3547      for ( uint i = 0; i < max; ++i ) {
3548        Node *m = n->in(i);
3549        if ( m == NULL ) continue;
3550        useful.push(m);
3551      }
3552    }
3553    for (int i = 0; i < _print_inlining_list->length(); i++) {
3554      tty->print(_print_inlining_list->at(i).ss()->as_string());
3555    }
3556  }
3557}
3558
3559int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
3560  if (n1->Opcode() < n2->Opcode())      return -1;
3561  else if (n1->Opcode() > n2->Opcode()) return 1;
3562
3563  assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
3564  for (uint i = 1; i < n1->req(); i++) {
3565    if (n1->in(i) < n2->in(i))      return -1;
3566    else if (n1->in(i) > n2->in(i)) return 1;
3567  }
3568
3569  return 0;
3570}
3571
3572int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
3573  Node* n1 = *n1p;
3574  Node* n2 = *n2p;
3575
3576  return cmp_expensive_nodes(n1, n2);
3577}
3578
3579void Compile::sort_expensive_nodes() {
3580  if (!expensive_nodes_sorted()) {
3581    _expensive_nodes->sort(cmp_expensive_nodes);
3582  }
3583}
3584
3585bool Compile::expensive_nodes_sorted() const {
3586  for (int i = 1; i < _expensive_nodes->length(); i++) {
3587    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
3588      return false;
3589    }
3590  }
3591  return true;
3592}
3593
3594bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
3595  if (_expensive_nodes->length() == 0) {
3596    return false;
3597  }
3598
3599  assert(OptimizeExpensiveOps, "optimization off?");
3600
3601  // Take this opportunity to remove dead nodes from the list
3602  int j = 0;
3603  for (int i = 0; i < _expensive_nodes->length(); i++) {
3604    Node* n = _expensive_nodes->at(i);
3605    if (!n->is_unreachable(igvn)) {
3606      assert(n->is_expensive(), "should be expensive");
3607      _expensive_nodes->at_put(j, n);
3608      j++;
3609    }
3610  }
3611  _expensive_nodes->trunc_to(j);
3612
3613  // Then sort the list so that similar nodes are next to each other
3614  // and check for at least two nodes of identical kind with same data
3615  // inputs.
3616  sort_expensive_nodes();
3617
3618  for (int i = 0; i < _expensive_nodes->length()-1; i++) {
3619    if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
3620      return true;
3621    }
3622  }
3623
3624  return false;
3625}
3626
3627void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
3628  if (_expensive_nodes->length() == 0) {
3629    return;
3630  }
3631
3632  assert(OptimizeExpensiveOps, "optimization off?");
3633
3634  // Sort to bring similar nodes next to each other and clear the
3635  // control input of nodes for which there's only a single copy.
3636  sort_expensive_nodes();
3637
3638  int j = 0;
3639  int identical = 0;
3640  int i = 0;
3641  for (; i < _expensive_nodes->length()-1; i++) {
3642    assert(j <= i, "can't write beyond current index");
3643    if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
3644      identical++;
3645      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
3646      continue;
3647    }
3648    if (identical > 0) {
3649      _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
3650      identical = 0;
3651    } else {
3652      Node* n = _expensive_nodes->at(i);
3653      igvn.hash_delete(n);
3654      n->set_req(0, NULL);
3655      igvn.hash_insert(n);
3656    }
3657  }
3658  if (identical > 0) {
3659    _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
3660  } else if (_expensive_nodes->length() >= 1) {
3661    Node* n = _expensive_nodes->at(i);
3662    igvn.hash_delete(n);
3663    n->set_req(0, NULL);
3664    igvn.hash_insert(n);
3665  }
3666  _expensive_nodes->trunc_to(j);
3667}
3668
3669void Compile::add_expensive_node(Node * n) {
3670  assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
3671  assert(n->is_expensive(), "expensive nodes with non-null control here only");
3672  assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
3673  if (OptimizeExpensiveOps) {
3674    _expensive_nodes->append(n);
3675  } else {
3676    // Clear control input and let IGVN optimize expensive nodes if
3677    // OptimizeExpensiveOps is off.
3678    n->set_req(0, NULL);
3679  }
3680}
3681
3682// Auxiliary method to support randomized stressing/fuzzing.
3683//
3684// This method can be called the arbitrary number of times, with current count
3685// as the argument. The logic allows selecting a single candidate from the
3686// running list of candidates as follows:
3687//    int count = 0;
3688//    Cand* selected = null;
3689//    while(cand = cand->next()) {
3690//      if (randomized_select(++count)) {
3691//        selected = cand;
3692//      }
3693//    }
3694//
3695// Including count equalizes the chances any candidate is "selected".
3696// This is useful when we don't have the complete list of candidates to choose
3697// from uniformly. In this case, we need to adjust the randomicity of the
3698// selection, or else we will end up biasing the selection towards the latter
3699// candidates.
3700//
3701// Quick back-envelope calculation shows that for the list of n candidates
3702// the equal probability for the candidate to persist as "best" can be
3703// achieved by replacing it with "next" k-th candidate with the probability
3704// of 1/k. It can be easily shown that by the end of the run, the
3705// probability for any candidate is converged to 1/n, thus giving the
3706// uniform distribution among all the candidates.
3707//
3708// We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
3709#define RANDOMIZED_DOMAIN_POW 29
3710#define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
3711#define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
3712bool Compile::randomized_select(int count) {
3713  assert(count > 0, "only positive");
3714  return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
3715}
3716