c1_IR.cpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_FrameMap.hpp"
28#include "c1/c1_GraphBuilder.hpp"
29#include "c1/c1_IR.hpp"
30#include "c1/c1_InstructionPrinter.hpp"
31#include "c1/c1_Optimizer.hpp"
32#include "utilities/bitMap.inline.hpp"
33
34
35// Implementation of XHandlers
36//
37// Note: This code could eventually go away if we are
38//       just using the ciExceptionHandlerStream.
39
40XHandlers::XHandlers(ciMethod* method) : _list(method->exception_table_length()) {
41  ciExceptionHandlerStream s(method);
42  while (!s.is_done()) {
43    _list.append(new XHandler(s.handler()));
44    s.next();
45  }
46  assert(s.count() == method->exception_table_length(), "exception table lengths inconsistent");
47}
48
49// deep copy of all XHandler contained in list
50XHandlers::XHandlers(XHandlers* other) :
51  _list(other->length())
52{
53  for (int i = 0; i < other->length(); i++) {
54    _list.append(new XHandler(other->handler_at(i)));
55  }
56}
57
58// Returns whether a particular exception type can be caught.  Also
59// returns true if klass is unloaded or any exception handler
60// classes are unloaded.  type_is_exact indicates whether the throw
61// is known to be exactly that class or it might throw a subtype.
62bool XHandlers::could_catch(ciInstanceKlass* klass, bool type_is_exact) const {
63  // the type is unknown so be conservative
64  if (!klass->is_loaded()) {
65    return true;
66  }
67
68  for (int i = 0; i < length(); i++) {
69    XHandler* handler = handler_at(i);
70    if (handler->is_catch_all()) {
71      // catch of ANY
72      return true;
73    }
74    ciInstanceKlass* handler_klass = handler->catch_klass();
75    // if it's unknown it might be catchable
76    if (!handler_klass->is_loaded()) {
77      return true;
78    }
79    // if the throw type is definitely a subtype of the catch type
80    // then it can be caught.
81    if (klass->is_subtype_of(handler_klass)) {
82      return true;
83    }
84    if (!type_is_exact) {
85      // If the type isn't exactly known then it can also be caught by
86      // catch statements where the inexact type is a subtype of the
87      // catch type.
88      // given: foo extends bar extends Exception
89      // throw bar can be caught by catch foo, catch bar, and catch
90      // Exception, however it can't be caught by any handlers without
91      // bar in its type hierarchy.
92      if (handler_klass->is_subtype_of(klass)) {
93        return true;
94      }
95    }
96  }
97
98  return false;
99}
100
101
102bool XHandlers::equals(XHandlers* others) const {
103  if (others == NULL) return false;
104  if (length() != others->length()) return false;
105
106  for (int i = 0; i < length(); i++) {
107    if (!handler_at(i)->equals(others->handler_at(i))) return false;
108  }
109  return true;
110}
111
112bool XHandler::equals(XHandler* other) const {
113  assert(entry_pco() != -1 && other->entry_pco() != -1, "must have entry_pco");
114
115  if (entry_pco() != other->entry_pco()) return false;
116  if (scope_count() != other->scope_count()) return false;
117  if (_desc != other->_desc) return false;
118
119  assert(entry_block() == other->entry_block(), "entry_block must be equal when entry_pco is equal");
120  return true;
121}
122
123
124// Implementation of IRScope
125BlockBegin* IRScope::build_graph(Compilation* compilation, int osr_bci) {
126  GraphBuilder gm(compilation, this);
127  NOT_PRODUCT(if (PrintValueNumbering && Verbose) gm.print_stats());
128  if (compilation->bailed_out()) return NULL;
129  return gm.start();
130}
131
132
133IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMethod* method, int osr_bci, bool create_graph)
134: _callees(2)
135, _compilation(compilation)
136, _requires_phi_function(method->max_locals())
137{
138  _caller             = caller;
139  _level              = caller == NULL ?  0 : caller->level() + 1;
140  _method             = method;
141  _xhandlers          = new XHandlers(method);
142  _number_of_locks    = 0;
143  _monitor_pairing_ok = method->has_balanced_monitors();
144  _wrote_final        = false;
145  _start              = NULL;
146
147  if (osr_bci == -1) {
148    _requires_phi_function.clear();
149  } else {
150        // selective creation of phi functions is not possibel in osr-methods
151    _requires_phi_function.set_range(0, method->max_locals());
152  }
153
154  assert(method->holder()->is_loaded() , "method holder must be loaded");
155
156  // build graph if monitor pairing is ok
157  if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
158}
159
160
161int IRScope::max_stack() const {
162  int my_max = method()->max_stack();
163  int callee_max = 0;
164  for (int i = 0; i < number_of_callees(); i++) {
165    callee_max = MAX2(callee_max, callee_no(i)->max_stack());
166  }
167  return my_max + callee_max;
168}
169
170
171bool IRScopeDebugInfo::should_reexecute() {
172  ciMethod* cur_method = scope()->method();
173  int       cur_bci    = bci();
174  if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) {
175    Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
176    return Interpreter::bytecode_should_reexecute(code);
177  } else
178    return false;
179}
180
181
182// Implementation of CodeEmitInfo
183
184// Stack must be NON-null
185CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
186  : _scope(stack->scope())
187  , _scope_debug_info(NULL)
188  , _oop_map(NULL)
189  , _stack(stack)
190  , _exception_handlers(exception_handlers)
191  , _is_method_handle_invoke(false)
192  , _deoptimize_on_exception(deoptimize_on_exception) {
193  assert(_stack != NULL, "must be non null");
194}
195
196
197CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
198  : _scope(info->_scope)
199  , _exception_handlers(NULL)
200  , _scope_debug_info(NULL)
201  , _oop_map(NULL)
202  , _stack(stack == NULL ? info->_stack : stack)
203  , _is_method_handle_invoke(info->_is_method_handle_invoke)
204  , _deoptimize_on_exception(info->_deoptimize_on_exception) {
205
206  // deep copy of exception handlers
207  if (info->_exception_handlers != NULL) {
208    _exception_handlers = new XHandlers(info->_exception_handlers);
209  }
210}
211
212
213void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
214  // record the safepoint before recording the debug info for enclosing scopes
215  recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
216  _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
217  recorder->end_safepoint(pc_offset);
218}
219
220
221void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
222  assert(_oop_map != NULL, "oop map must already exist");
223  assert(opr->is_single_cpu(), "should not call otherwise");
224
225  VMReg name = frame_map()->regname(opr);
226  _oop_map->set_oop(name);
227}
228
229
230
231
232// Implementation of IR
233
234IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
235    _locals_size(in_WordSize(-1))
236  , _num_loops(0) {
237  // setup IR fields
238  _compilation = compilation;
239  _top_scope   = new IRScope(compilation, NULL, -1, method, osr_bci, true);
240  _code        = NULL;
241}
242
243
244void IR::optimize_blocks() {
245  Optimizer opt(this);
246  if (!compilation()->profile_branches()) {
247    if (DoCEE) {
248      opt.eliminate_conditional_expressions();
249#ifndef PRODUCT
250      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
251      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
252#endif
253    }
254    if (EliminateBlocks) {
255      opt.eliminate_blocks();
256#ifndef PRODUCT
257      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
258      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
259#endif
260    }
261  }
262}
263
264void IR::eliminate_null_checks() {
265  Optimizer opt(this);
266  if (EliminateNullChecks) {
267    opt.eliminate_null_checks();
268#ifndef PRODUCT
269    if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after null check elimination"); print(true); }
270    if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after null check elimination"); print(false); }
271#endif
272  }
273}
274
275
276static int sort_pairs(BlockPair** a, BlockPair** b) {
277  if ((*a)->from() == (*b)->from()) {
278    return (*a)->to()->block_id() - (*b)->to()->block_id();
279  } else {
280    return (*a)->from()->block_id() - (*b)->from()->block_id();
281  }
282}
283
284
285class CriticalEdgeFinder: public BlockClosure {
286  BlockPairList blocks;
287  IR*       _ir;
288
289 public:
290  CriticalEdgeFinder(IR* ir): _ir(ir) {}
291  void block_do(BlockBegin* bb) {
292    BlockEnd* be = bb->end();
293    int nos = be->number_of_sux();
294    if (nos >= 2) {
295      for (int i = 0; i < nos; i++) {
296        BlockBegin* sux = be->sux_at(i);
297        if (sux->number_of_preds() >= 2) {
298          blocks.append(new BlockPair(bb, sux));
299        }
300      }
301    }
302  }
303
304  void split_edges() {
305    BlockPair* last_pair = NULL;
306    blocks.sort(sort_pairs);
307    for (int i = 0; i < blocks.length(); i++) {
308      BlockPair* pair = blocks.at(i);
309      if (last_pair != NULL && pair->is_same(last_pair)) continue;
310      BlockBegin* from = pair->from();
311      BlockBegin* to = pair->to();
312      BlockBegin* split = from->insert_block_between(to);
313#ifndef PRODUCT
314      if ((PrintIR || PrintIR1) && Verbose) {
315        tty->print_cr("Split critical edge B%d -> B%d (new block B%d)",
316                      from->block_id(), to->block_id(), split->block_id());
317      }
318#endif
319      last_pair = pair;
320    }
321  }
322};
323
324void IR::split_critical_edges() {
325  CriticalEdgeFinder cef(this);
326
327  iterate_preorder(&cef);
328  cef.split_edges();
329}
330
331
332class UseCountComputer: public ValueVisitor, BlockClosure {
333 private:
334  void visit(Value* n) {
335    // Local instructions and Phis for expression stack values at the
336    // start of basic blocks are not added to the instruction list
337    if (!(*n)->is_linked() && (*n)->can_be_linked()) {
338      assert(false, "a node was not appended to the graph");
339      Compilation::current()->bailout("a node was not appended to the graph");
340    }
341    // use n's input if not visited before
342    if (!(*n)->is_pinned() && !(*n)->has_uses()) {
343      // note: a) if the instruction is pinned, it will be handled by compute_use_count
344      //       b) if the instruction has uses, it was touched before
345      //       => in both cases we don't need to update n's values
346      uses_do(n);
347    }
348    // use n
349    (*n)->_use_count++;
350  }
351
352  Values* worklist;
353  int depth;
354  enum {
355    max_recurse_depth = 20
356  };
357
358  void uses_do(Value* n) {
359    depth++;
360    if (depth > max_recurse_depth) {
361      // don't allow the traversal to recurse too deeply
362      worklist->push(*n);
363    } else {
364      (*n)->input_values_do(this);
365      // special handling for some instructions
366      if ((*n)->as_BlockEnd() != NULL) {
367        // note on BlockEnd:
368        //   must 'use' the stack only if the method doesn't
369        //   terminate, however, in those cases stack is empty
370        (*n)->state_values_do(this);
371      }
372    }
373    depth--;
374  }
375
376  void block_do(BlockBegin* b) {
377    depth = 0;
378    // process all pinned nodes as the roots of expression trees
379    for (Instruction* n = b; n != NULL; n = n->next()) {
380      if (n->is_pinned()) uses_do(&n);
381    }
382    assert(depth == 0, "should have counted back down");
383
384    // now process any unpinned nodes which recursed too deeply
385    while (worklist->length() > 0) {
386      Value t = worklist->pop();
387      if (!t->is_pinned()) {
388        // compute the use count
389        uses_do(&t);
390
391        // pin the instruction so that LIRGenerator doesn't recurse
392        // too deeply during it's evaluation.
393        t->pin();
394      }
395    }
396    assert(depth == 0, "should have counted back down");
397  }
398
399  UseCountComputer() {
400    worklist = new Values();
401    depth = 0;
402  }
403
404 public:
405  static void compute(BlockList* blocks) {
406    UseCountComputer ucc;
407    blocks->iterate_backward(&ucc);
408  }
409};
410
411
412// helper macro for short definition of trace-output inside code
413#ifndef PRODUCT
414  #define TRACE_LINEAR_SCAN(level, code)       \
415    if (TraceLinearScanLevel >= level) {       \
416      code;                                    \
417    }
418#else
419  #define TRACE_LINEAR_SCAN(level, code)
420#endif
421
422class ComputeLinearScanOrder : public StackObj {
423 private:
424  int        _max_block_id;        // the highest block_id of a block
425  int        _num_blocks;          // total number of blocks (smaller than _max_block_id)
426  int        _num_loops;           // total number of loops
427  bool       _iterative_dominators;// method requires iterative computation of dominatiors
428
429  BlockList* _linear_scan_order;   // the resulting list of blocks in correct order
430
431  BitMap     _visited_blocks;      // used for recursive processing of blocks
432  BitMap     _active_blocks;       // used for recursive processing of blocks
433  BitMap     _dominator_blocks;    // temproary BitMap used for computation of dominator
434  intArray   _forward_branches;    // number of incoming forward branches for each block
435  BlockList  _loop_end_blocks;     // list of all loop end blocks collected during count_edges
436  BitMap2D   _loop_map;            // two-dimensional bit set: a bit is set if a block is contained in a loop
437  BlockList  _work_list;           // temporary list (used in mark_loops and compute_order)
438  BlockList  _loop_headers;
439
440  Compilation* _compilation;
441
442  // accessors for _visited_blocks and _active_blocks
443  void init_visited()                     { _active_blocks.clear(); _visited_blocks.clear(); }
444  bool is_visited(BlockBegin* b) const    { return _visited_blocks.at(b->block_id()); }
445  bool is_active(BlockBegin* b) const     { return _active_blocks.at(b->block_id()); }
446  void set_visited(BlockBegin* b)         { assert(!is_visited(b), "already set"); _visited_blocks.set_bit(b->block_id()); }
447  void set_active(BlockBegin* b)          { assert(!is_active(b), "already set");  _active_blocks.set_bit(b->block_id()); }
448  void clear_active(BlockBegin* b)        { assert(is_active(b), "not already");   _active_blocks.clear_bit(b->block_id()); }
449
450  // accessors for _forward_branches
451  void inc_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) + 1); }
452  int  dec_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) - 1); return _forward_branches.at(b->block_id()); }
453
454  // accessors for _loop_map
455  bool is_block_in_loop   (int loop_idx, BlockBegin* b) const { return _loop_map.at(loop_idx, b->block_id()); }
456  void set_block_in_loop  (int loop_idx, BlockBegin* b)       { _loop_map.set_bit(loop_idx, b->block_id()); }
457  void clear_block_in_loop(int loop_idx, int block_id)        { _loop_map.clear_bit(loop_idx, block_id); }
458
459  // count edges between blocks
460  void count_edges(BlockBegin* cur, BlockBegin* parent);
461
462  // loop detection
463  void mark_loops();
464  void clear_non_natural_loops(BlockBegin* start_block);
465  void assign_loop_depth(BlockBegin* start_block);
466
467  // computation of final block order
468  BlockBegin* common_dominator(BlockBegin* a, BlockBegin* b);
469  void compute_dominator(BlockBegin* cur, BlockBegin* parent);
470  int  compute_weight(BlockBegin* cur);
471  bool ready_for_processing(BlockBegin* cur);
472  void sort_into_work_list(BlockBegin* b);
473  void append_block(BlockBegin* cur);
474  void compute_order(BlockBegin* start_block);
475
476  // fixup of dominators for non-natural loops
477  bool compute_dominators_iter();
478  void compute_dominators();
479
480  // debug functions
481  NOT_PRODUCT(void print_blocks();)
482  DEBUG_ONLY(void verify();)
483
484  Compilation* compilation() const { return _compilation; }
485 public:
486  ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
487
488  // accessors for final result
489  BlockList* linear_scan_order() const    { return _linear_scan_order; }
490  int        num_loops() const            { return _num_loops; }
491};
492
493
494ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
495  _max_block_id(BlockBegin::number_of_blocks()),
496  _num_blocks(0),
497  _num_loops(0),
498  _iterative_dominators(false),
499  _visited_blocks(_max_block_id),
500  _active_blocks(_max_block_id),
501  _dominator_blocks(_max_block_id),
502  _forward_branches(_max_block_id, 0),
503  _loop_end_blocks(8),
504  _work_list(8),
505  _linear_scan_order(NULL), // initialized later with correct size
506  _loop_map(0, 0),          // initialized later with correct size
507  _compilation(c)
508{
509  TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
510
511  init_visited();
512  count_edges(start_block, NULL);
513
514  if (compilation()->is_profiling()) {
515    ciMethod *method = compilation()->method();
516    if (!method->is_accessor()) {
517      ciMethodData* md = method->method_data_or_null();
518      assert(md != NULL, "Sanity");
519      md->set_compilation_stats(_num_loops, _num_blocks);
520    }
521  }
522
523  if (_num_loops > 0) {
524    mark_loops();
525    clear_non_natural_loops(start_block);
526    assign_loop_depth(start_block);
527  }
528
529  compute_order(start_block);
530  compute_dominators();
531
532  NOT_PRODUCT(print_blocks());
533  DEBUG_ONLY(verify());
534}
535
536
537// Traverse the CFG:
538// * count total number of blocks
539// * count all incoming edges and backward incoming edges
540// * number loop header blocks
541// * create a list with all loop end blocks
542void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) {
543  TRACE_LINEAR_SCAN(3, tty->print_cr("Enter count_edges for block B%d coming from B%d", cur->block_id(), parent != NULL ? parent->block_id() : -1));
544  assert(cur->dominator() == NULL, "dominator already initialized");
545
546  if (is_active(cur)) {
547    TRACE_LINEAR_SCAN(3, tty->print_cr("backward branch"));
548    assert(is_visited(cur), "block must be visisted when block is active");
549    assert(parent != NULL, "must have parent");
550
551    cur->set(BlockBegin::linear_scan_loop_header_flag);
552    cur->set(BlockBegin::backward_branch_target_flag);
553
554    parent->set(BlockBegin::linear_scan_loop_end_flag);
555
556    // When a loop header is also the start of an exception handler, then the backward branch is
557    // an exception edge. Because such edges are usually critical edges which cannot be split, the
558    // loop must be excluded here from processing.
559    if (cur->is_set(BlockBegin::exception_entry_flag)) {
560      // Make sure that dominators are correct in this weird situation
561      _iterative_dominators = true;
562      return;
563    }
564    assert(parent->number_of_sux() == 1 && parent->sux_at(0) == cur,
565           "loop end blocks must have one successor (critical edges are split)");
566
567    _loop_end_blocks.append(parent);
568    return;
569  }
570
571  // increment number of incoming forward branches
572  inc_forward_branches(cur);
573
574  if (is_visited(cur)) {
575    TRACE_LINEAR_SCAN(3, tty->print_cr("block already visited"));
576    return;
577  }
578
579  _num_blocks++;
580  set_visited(cur);
581  set_active(cur);
582
583  // recursive call for all successors
584  int i;
585  for (i = cur->number_of_sux() - 1; i >= 0; i--) {
586    count_edges(cur->sux_at(i), cur);
587  }
588  for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) {
589    count_edges(cur->exception_handler_at(i), cur);
590  }
591
592  clear_active(cur);
593
594  // Each loop has a unique number.
595  // When multiple loops are nested, assign_loop_depth assumes that the
596  // innermost loop has the lowest number. This is guaranteed by setting
597  // the loop number after the recursive calls for the successors above
598  // have returned.
599  if (cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
600    assert(cur->loop_index() == -1, "cannot set loop-index twice");
601    TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
602
603    cur->set_loop_index(_num_loops);
604    _loop_headers.append(cur);
605    _num_loops++;
606  }
607
608  TRACE_LINEAR_SCAN(3, tty->print_cr("Finished count_edges for block B%d", cur->block_id()));
609}
610
611
612void ComputeLinearScanOrder::mark_loops() {
613  TRACE_LINEAR_SCAN(3, tty->print_cr("----- marking loops"));
614
615  _loop_map = BitMap2D(_num_loops, _max_block_id);
616  _loop_map.clear();
617
618  for (int i = _loop_end_blocks.length() - 1; i >= 0; i--) {
619    BlockBegin* loop_end   = _loop_end_blocks.at(i);
620    BlockBegin* loop_start = loop_end->sux_at(0);
621    int         loop_idx   = loop_start->loop_index();
622
623    TRACE_LINEAR_SCAN(3, tty->print_cr("Processing loop from B%d to B%d (loop %d):", loop_start->block_id(), loop_end->block_id(), loop_idx));
624    assert(loop_end->is_set(BlockBegin::linear_scan_loop_end_flag), "loop end flag must be set");
625    assert(loop_end->number_of_sux() == 1, "incorrect number of successors");
626    assert(loop_start->is_set(BlockBegin::linear_scan_loop_header_flag), "loop header flag must be set");
627    assert(loop_idx >= 0 && loop_idx < _num_loops, "loop index not set");
628    assert(_work_list.is_empty(), "work list must be empty before processing");
629
630    // add the end-block of the loop to the working list
631    _work_list.push(loop_end);
632    set_block_in_loop(loop_idx, loop_end);
633    do {
634      BlockBegin* cur = _work_list.pop();
635
636      TRACE_LINEAR_SCAN(3, tty->print_cr("    processing B%d", cur->block_id()));
637      assert(is_block_in_loop(loop_idx, cur), "bit in loop map must be set when block is in work list");
638
639      // recursive processing of all predecessors ends when start block of loop is reached
640      if (cur != loop_start && !cur->is_set(BlockBegin::osr_entry_flag)) {
641        for (int j = cur->number_of_preds() - 1; j >= 0; j--) {
642          BlockBegin* pred = cur->pred_at(j);
643
644          if (!is_block_in_loop(loop_idx, pred) /*&& !pred->is_set(BlockBeginosr_entry_flag)*/) {
645            // this predecessor has not been processed yet, so add it to work list
646            TRACE_LINEAR_SCAN(3, tty->print_cr("    pushing B%d", pred->block_id()));
647            _work_list.push(pred);
648            set_block_in_loop(loop_idx, pred);
649          }
650        }
651      }
652    } while (!_work_list.is_empty());
653  }
654}
655
656
657// check for non-natural loops (loops where the loop header does not dominate
658// all other loop blocks = loops with mulitple entries).
659// such loops are ignored
660void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
661  for (int i = _num_loops - 1; i >= 0; i--) {
662    if (is_block_in_loop(i, start_block)) {
663      // loop i contains the entry block of the method
664      // -> this is not a natural loop, so ignore it
665      TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
666
667      BlockBegin *loop_header = _loop_headers.at(i);
668      assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");
669
670      for (int j = 0; j < loop_header->number_of_preds(); j++) {
671        BlockBegin *pred = loop_header->pred_at(j);
672        pred->clear(BlockBegin::linear_scan_loop_end_flag);
673      }
674
675      loop_header->clear(BlockBegin::linear_scan_loop_header_flag);
676
677      for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
678        clear_block_in_loop(i, block_id);
679      }
680      _iterative_dominators = true;
681    }
682  }
683}
684
685void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
686  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight"));
687  init_visited();
688
689  assert(_work_list.is_empty(), "work list must be empty before processing");
690  _work_list.append(start_block);
691
692  do {
693    BlockBegin* cur = _work_list.pop();
694
695    if (!is_visited(cur)) {
696      set_visited(cur);
697      TRACE_LINEAR_SCAN(4, tty->print_cr("Computing loop depth for block B%d", cur->block_id()));
698
699      // compute loop-depth and loop-index for the block
700      assert(cur->loop_depth() == 0, "cannot set loop-depth twice");
701      int i;
702      int loop_depth = 0;
703      int min_loop_idx = -1;
704      for (i = _num_loops - 1; i >= 0; i--) {
705        if (is_block_in_loop(i, cur)) {
706          loop_depth++;
707          min_loop_idx = i;
708        }
709      }
710      cur->set_loop_depth(loop_depth);
711      cur->set_loop_index(min_loop_idx);
712
713      // append all unvisited successors to work list
714      for (i = cur->number_of_sux() - 1; i >= 0; i--) {
715        _work_list.append(cur->sux_at(i));
716      }
717      for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) {
718        _work_list.append(cur->exception_handler_at(i));
719      }
720    }
721  } while (!_work_list.is_empty());
722}
723
724
725BlockBegin* ComputeLinearScanOrder::common_dominator(BlockBegin* a, BlockBegin* b) {
726  assert(a != NULL && b != NULL, "must have input blocks");
727
728  _dominator_blocks.clear();
729  while (a != NULL) {
730    _dominator_blocks.set_bit(a->block_id());
731    assert(a->dominator() != NULL || a == _linear_scan_order->at(0), "dominator must be initialized");
732    a = a->dominator();
733  }
734  while (b != NULL && !_dominator_blocks.at(b->block_id())) {
735    assert(b->dominator() != NULL || b == _linear_scan_order->at(0), "dominator must be initialized");
736    b = b->dominator();
737  }
738
739  assert(b != NULL, "could not find dominator");
740  return b;
741}
742
743void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* parent) {
744  if (cur->dominator() == NULL) {
745    TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: initializing dominator of B%d to B%d", cur->block_id(), parent->block_id()));
746    cur->set_dominator(parent);
747
748  } else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
749    TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
750    // Does not hold for exception blocks
751    assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), "");
752    cur->set_dominator(common_dominator(cur->dominator(), parent));
753  }
754
755  // Additional edge to xhandler of all our successors
756  // range check elimination needs that the state at the end of a
757  // block be valid in every block it dominates so cur must dominate
758  // the exception handlers of its successors.
759  int num_cur_xhandler = cur->number_of_exception_handlers();
760  for (int j = 0; j < num_cur_xhandler; j++) {
761    BlockBegin* xhandler = cur->exception_handler_at(j);
762    compute_dominator(xhandler, parent);
763  }
764}
765
766
767int ComputeLinearScanOrder::compute_weight(BlockBegin* cur) {
768  BlockBegin* single_sux = NULL;
769  if (cur->number_of_sux() == 1) {
770    single_sux = cur->sux_at(0);
771  }
772
773  // limit loop-depth to 15 bit (only for security reason, it will never be so big)
774  int weight = (cur->loop_depth() & 0x7FFF) << 16;
775
776  // general macro for short definition of weight flags
777  // the first instance of INC_WEIGHT_IF has the highest priority
778  int cur_bit = 15;
779  #define INC_WEIGHT_IF(condition) if ((condition)) { weight |= (1 << cur_bit); } cur_bit--;
780
781  // this is necessery for the (very rare) case that two successing blocks have
782  // the same loop depth, but a different loop index (can happen for endless loops
783  // with exception handlers)
784  INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_header_flag));
785
786  // loop end blocks (blocks that end with a backward branch) are added
787  // after all other blocks of the loop.
788  INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_end_flag));
789
790  // critical edge split blocks are prefered because than they have a bigger
791  // proability to be completely empty
792  INC_WEIGHT_IF(cur->is_set(BlockBegin::critical_edge_split_flag));
793
794  // exceptions should not be thrown in normal control flow, so these blocks
795  // are added as late as possible
796  INC_WEIGHT_IF(cur->end()->as_Throw() == NULL  && (single_sux == NULL || single_sux->end()->as_Throw()  == NULL));
797  INC_WEIGHT_IF(cur->end()->as_Return() == NULL && (single_sux == NULL || single_sux->end()->as_Return() == NULL));
798
799  // exceptions handlers are added as late as possible
800  INC_WEIGHT_IF(!cur->is_set(BlockBegin::exception_entry_flag));
801
802  // guarantee that weight is > 0
803  weight |= 1;
804
805  #undef INC_WEIGHT_IF
806  assert(cur_bit >= 0, "too many flags");
807  assert(weight > 0, "weight cannot become negative");
808
809  return weight;
810}
811
812bool ComputeLinearScanOrder::ready_for_processing(BlockBegin* cur) {
813  // Discount the edge just traveled.
814  // When the number drops to zero, all forward branches were processed
815  if (dec_forward_branches(cur) != 0) {
816    return false;
817  }
818
819  assert(_linear_scan_order->index_of(cur) == -1, "block already processed (block can be ready only once)");
820  assert(_work_list.index_of(cur) == -1, "block already in work-list (block can be ready only once)");
821  return true;
822}
823
824void ComputeLinearScanOrder::sort_into_work_list(BlockBegin* cur) {
825  assert(_work_list.index_of(cur) == -1, "block already in work list");
826
827  int cur_weight = compute_weight(cur);
828
829  // the linear_scan_number is used to cache the weight of a block
830  cur->set_linear_scan_number(cur_weight);
831
832#ifndef PRODUCT
833  if (StressLinearScan) {
834    _work_list.insert_before(0, cur);
835    return;
836  }
837#endif
838
839  _work_list.append(NULL); // provide space for new element
840
841  int insert_idx = _work_list.length() - 1;
842  while (insert_idx > 0 && _work_list.at(insert_idx - 1)->linear_scan_number() > cur_weight) {
843    _work_list.at_put(insert_idx, _work_list.at(insert_idx - 1));
844    insert_idx--;
845  }
846  _work_list.at_put(insert_idx, cur);
847
848  TRACE_LINEAR_SCAN(3, tty->print_cr("Sorted B%d into worklist. new worklist:", cur->block_id()));
849  TRACE_LINEAR_SCAN(3, for (int i = 0; i < _work_list.length(); i++) tty->print_cr("%8d B%2d  weight:%6x", i, _work_list.at(i)->block_id(), _work_list.at(i)->linear_scan_number()));
850
851#ifdef ASSERT
852  for (int i = 0; i < _work_list.length(); i++) {
853    assert(_work_list.at(i)->linear_scan_number() > 0, "weight not set");
854    assert(i == 0 || _work_list.at(i - 1)->linear_scan_number() <= _work_list.at(i)->linear_scan_number(), "incorrect order in worklist");
855  }
856#endif
857}
858
859void ComputeLinearScanOrder::append_block(BlockBegin* cur) {
860  TRACE_LINEAR_SCAN(3, tty->print_cr("appending block B%d (weight 0x%6x) to linear-scan order", cur->block_id(), cur->linear_scan_number()));
861  assert(_linear_scan_order->index_of(cur) == -1, "cannot add the same block twice");
862
863  // currently, the linear scan order and code emit order are equal.
864  // therefore the linear_scan_number and the weight of a block must also
865  // be equal.
866  cur->set_linear_scan_number(_linear_scan_order->length());
867  _linear_scan_order->append(cur);
868}
869
870void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
871  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order"));
872
873  // the start block is always the first block in the linear scan order
874  _linear_scan_order = new BlockList(_num_blocks);
875  append_block(start_block);
876
877  assert(start_block->end()->as_Base() != NULL, "start block must end with Base-instruction");
878  BlockBegin* std_entry = ((Base*)start_block->end())->std_entry();
879  BlockBegin* osr_entry = ((Base*)start_block->end())->osr_entry();
880
881  BlockBegin* sux_of_osr_entry = NULL;
882  if (osr_entry != NULL) {
883    // special handling for osr entry:
884    // ignore the edge between the osr entry and its successor for processing
885    // the osr entry block is added manually below
886    assert(osr_entry->number_of_sux() == 1, "osr entry must have exactly one successor");
887    assert(osr_entry->sux_at(0)->number_of_preds() >= 2, "sucessor of osr entry must have two predecessors (otherwise it is not present in normal control flow");
888
889    sux_of_osr_entry = osr_entry->sux_at(0);
890    dec_forward_branches(sux_of_osr_entry);
891
892    compute_dominator(osr_entry, start_block);
893    _iterative_dominators = true;
894  }
895  compute_dominator(std_entry, start_block);
896
897  // start processing with standard entry block
898  assert(_work_list.is_empty(), "list must be empty before processing");
899
900  if (ready_for_processing(std_entry)) {
901    sort_into_work_list(std_entry);
902  } else {
903    assert(false, "the std_entry must be ready for processing (otherwise, the method has no start block)");
904  }
905
906  do {
907    BlockBegin* cur = _work_list.pop();
908
909    if (cur == sux_of_osr_entry) {
910      // the osr entry block is ignored in normal processing, it is never added to the
911      // work list. Instead, it is added as late as possible manually here.
912      append_block(osr_entry);
913      compute_dominator(cur, osr_entry);
914    }
915    append_block(cur);
916
917    int i;
918    int num_sux = cur->number_of_sux();
919    // changed loop order to get "intuitive" order of if- and else-blocks
920    for (i = 0; i < num_sux; i++) {
921      BlockBegin* sux = cur->sux_at(i);
922      compute_dominator(sux, cur);
923      if (ready_for_processing(sux)) {
924        sort_into_work_list(sux);
925      }
926    }
927    num_sux = cur->number_of_exception_handlers();
928    for (i = 0; i < num_sux; i++) {
929      BlockBegin* sux = cur->exception_handler_at(i);
930      if (ready_for_processing(sux)) {
931        sort_into_work_list(sux);
932      }
933    }
934  } while (_work_list.length() > 0);
935}
936
937
938bool ComputeLinearScanOrder::compute_dominators_iter() {
939  bool changed = false;
940  int num_blocks = _linear_scan_order->length();
941
942  assert(_linear_scan_order->at(0)->dominator() == NULL, "must not have dominator");
943  assert(_linear_scan_order->at(0)->number_of_preds() == 0, "must not have predecessors");
944  for (int i = 1; i < num_blocks; i++) {
945    BlockBegin* block = _linear_scan_order->at(i);
946
947    BlockBegin* dominator = block->pred_at(0);
948    int num_preds = block->number_of_preds();
949
950    TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: Processing B%d", block->block_id()));
951
952    for (int j = 0; j < num_preds; j++) {
953
954      BlockBegin *pred = block->pred_at(j);
955      TRACE_LINEAR_SCAN(4, tty->print_cr("   DOM: Subrocessing B%d", pred->block_id()));
956
957      if (block->is_set(BlockBegin::exception_entry_flag)) {
958        dominator = common_dominator(dominator, pred);
959        int num_pred_preds = pred->number_of_preds();
960        for (int k = 0; k < num_pred_preds; k++) {
961          dominator = common_dominator(dominator, pred->pred_at(k));
962        }
963      } else {
964        dominator = common_dominator(dominator, pred);
965      }
966    }
967
968    if (dominator != block->dominator()) {
969      TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: updating dominator of B%d from B%d to B%d", block->block_id(), block->dominator()->block_id(), dominator->block_id()));
970
971      block->set_dominator(dominator);
972      changed = true;
973    }
974  }
975  return changed;
976}
977
978void ComputeLinearScanOrder::compute_dominators() {
979  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing dominators (iterative computation reqired: %d)", _iterative_dominators));
980
981  // iterative computation of dominators is only required for methods with non-natural loops
982  // and OSR-methods. For all other methods, the dominators computed when generating the
983  // linear scan block order are correct.
984  if (_iterative_dominators) {
985    do {
986      TRACE_LINEAR_SCAN(1, tty->print_cr("DOM: next iteration of fix-point calculation"));
987    } while (compute_dominators_iter());
988  }
989
990  // check that dominators are correct
991  assert(!compute_dominators_iter(), "fix point not reached");
992
993  // Add Blocks to dominates-Array
994  int num_blocks = _linear_scan_order->length();
995  for (int i = 0; i < num_blocks; i++) {
996    BlockBegin* block = _linear_scan_order->at(i);
997
998    BlockBegin *dom = block->dominator();
999    if (dom) {
1000      assert(dom->dominator_depth() != -1, "Dominator must have been visited before");
1001      dom->dominates()->append(block);
1002      block->set_dominator_depth(dom->dominator_depth() + 1);
1003    } else {
1004      block->set_dominator_depth(0);
1005    }
1006  }
1007}
1008
1009
1010#ifndef PRODUCT
1011void ComputeLinearScanOrder::print_blocks() {
1012  if (TraceLinearScanLevel >= 2) {
1013    tty->print_cr("----- loop information:");
1014    for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) {
1015      BlockBegin* cur = _linear_scan_order->at(block_idx);
1016
1017      tty->print("%4d: B%2d: ", cur->linear_scan_number(), cur->block_id());
1018      for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) {
1019        tty->print ("%d ", is_block_in_loop(loop_idx, cur));
1020      }
1021      tty->print_cr(" -> loop_index: %2d, loop_depth: %2d", cur->loop_index(), cur->loop_depth());
1022    }
1023  }
1024
1025  if (TraceLinearScanLevel >= 1) {
1026    tty->print_cr("----- linear-scan block order:");
1027    for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) {
1028      BlockBegin* cur = _linear_scan_order->at(block_idx);
1029      tty->print("%4d: B%2d    loop: %2d  depth: %2d", cur->linear_scan_number(), cur->block_id(), cur->loop_index(), cur->loop_depth());
1030
1031      tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
1032      tty->print(cur->is_set(BlockBegin::critical_edge_split_flag)     ? " ce" : "   ");
1033      tty->print(cur->is_set(BlockBegin::linear_scan_loop_header_flag) ? " lh" : "   ");
1034      tty->print(cur->is_set(BlockBegin::linear_scan_loop_end_flag)    ? " le" : "   ");
1035
1036      if (cur->dominator() != NULL) {
1037        tty->print("    dom: B%d ", cur->dominator()->block_id());
1038      } else {
1039        tty->print("    dom: NULL ");
1040      }
1041
1042      if (cur->number_of_preds() > 0) {
1043        tty->print("    preds: ");
1044        for (int j = 0; j < cur->number_of_preds(); j++) {
1045          BlockBegin* pred = cur->pred_at(j);
1046          tty->print("B%d ", pred->block_id());
1047        }
1048      }
1049      if (cur->number_of_sux() > 0) {
1050        tty->print("    sux: ");
1051        for (int j = 0; j < cur->number_of_sux(); j++) {
1052          BlockBegin* sux = cur->sux_at(j);
1053          tty->print("B%d ", sux->block_id());
1054        }
1055      }
1056      if (cur->number_of_exception_handlers() > 0) {
1057        tty->print("    ex: ");
1058        for (int j = 0; j < cur->number_of_exception_handlers(); j++) {
1059          BlockBegin* ex = cur->exception_handler_at(j);
1060          tty->print("B%d ", ex->block_id());
1061        }
1062      }
1063      tty->cr();
1064    }
1065  }
1066}
1067#endif
1068
1069#ifdef ASSERT
1070void ComputeLinearScanOrder::verify() {
1071  assert(_linear_scan_order->length() == _num_blocks, "wrong number of blocks in list");
1072
1073  if (StressLinearScan) {
1074    // blocks are scrambled when StressLinearScan is used
1075    return;
1076  }
1077
1078  // check that all successors of a block have a higher linear-scan-number
1079  // and that all predecessors of a block have a lower linear-scan-number
1080  // (only backward branches of loops are ignored)
1081  int i;
1082  for (i = 0; i < _linear_scan_order->length(); i++) {
1083    BlockBegin* cur = _linear_scan_order->at(i);
1084
1085    assert(cur->linear_scan_number() == i, "incorrect linear_scan_number");
1086    assert(cur->linear_scan_number() >= 0 && cur->linear_scan_number() == _linear_scan_order->index_of(cur), "incorrect linear_scan_number");
1087
1088    int j;
1089    for (j = cur->number_of_sux() - 1; j >= 0; j--) {
1090      BlockBegin* sux = cur->sux_at(j);
1091
1092      assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
1093      if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {
1094        assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
1095      }
1096      if (cur->loop_depth() == sux->loop_depth()) {
1097        assert(cur->loop_index() == sux->loop_index() || sux->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index");
1098      }
1099    }
1100
1101    for (j = cur->number_of_preds() - 1; j >= 0; j--) {
1102      BlockBegin* pred = cur->pred_at(j);
1103
1104      assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
1105      if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {
1106        assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
1107      }
1108      if (cur->loop_depth() == pred->loop_depth()) {
1109        assert(cur->loop_index() == pred->loop_index() || cur->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index");
1110      }
1111
1112      assert(cur->dominator()->linear_scan_number() <= cur->pred_at(j)->linear_scan_number(), "dominator must be before predecessors");
1113    }
1114
1115    // check dominator
1116    if (i == 0) {
1117      assert(cur->dominator() == NULL, "first block has no dominator");
1118    } else {
1119      assert(cur->dominator() != NULL, "all but first block must have dominator");
1120    }
1121    // Assertion does not hold for exception handlers
1122    assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
1123  }
1124
1125  // check that all loops are continuous
1126  for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) {
1127    int block_idx = 0;
1128    assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "the first block must not be present in any loop");
1129
1130    // skip blocks before the loop
1131    while (block_idx < _num_blocks && !is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) {
1132      block_idx++;
1133    }
1134    // skip blocks of loop
1135    while (block_idx < _num_blocks && is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) {
1136      block_idx++;
1137    }
1138    // after the first non-loop block, there must not be another loop-block
1139    while (block_idx < _num_blocks) {
1140      assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "loop not continuous in linear-scan order");
1141      block_idx++;
1142    }
1143  }
1144}
1145#endif
1146
1147
1148void IR::compute_code() {
1149  assert(is_valid(), "IR must be valid");
1150
1151  ComputeLinearScanOrder compute_order(compilation(), start());
1152  _num_loops = compute_order.num_loops();
1153  _code = compute_order.linear_scan_order();
1154}
1155
1156
1157void IR::compute_use_counts() {
1158  // make sure all values coming out of this block get evaluated.
1159  int num_blocks = _code->length();
1160  for (int i = 0; i < num_blocks; i++) {
1161    _code->at(i)->end()->state()->pin_stack_for_linear_scan();
1162  }
1163
1164  // compute use counts
1165  UseCountComputer::compute(_code);
1166}
1167
1168
1169void IR::iterate_preorder(BlockClosure* closure) {
1170  assert(is_valid(), "IR must be valid");
1171  start()->iterate_preorder(closure);
1172}
1173
1174
1175void IR::iterate_postorder(BlockClosure* closure) {
1176  assert(is_valid(), "IR must be valid");
1177  start()->iterate_postorder(closure);
1178}
1179
1180void IR::iterate_linear_scan_order(BlockClosure* closure) {
1181  linear_scan_order()->iterate_forward(closure);
1182}
1183
1184
1185#ifndef PRODUCT
1186class BlockPrinter: public BlockClosure {
1187 private:
1188  InstructionPrinter* _ip;
1189  bool                _cfg_only;
1190  bool                _live_only;
1191
1192 public:
1193  BlockPrinter(InstructionPrinter* ip, bool cfg_only, bool live_only = false) {
1194    _ip       = ip;
1195    _cfg_only = cfg_only;
1196    _live_only = live_only;
1197  }
1198
1199  virtual void block_do(BlockBegin* block) {
1200    if (_cfg_only) {
1201      _ip->print_instr(block); tty->cr();
1202    } else {
1203      block->print_block(*_ip, _live_only);
1204    }
1205  }
1206};
1207
1208
1209void IR::print(BlockBegin* start, bool cfg_only, bool live_only) {
1210  ttyLocker ttyl;
1211  InstructionPrinter ip(!cfg_only);
1212  BlockPrinter bp(&ip, cfg_only, live_only);
1213  start->iterate_preorder(&bp);
1214  tty->cr();
1215}
1216
1217void IR::print(bool cfg_only, bool live_only) {
1218  if (is_valid()) {
1219    print(start(), cfg_only, live_only);
1220  } else {
1221    tty->print_cr("invalid IR");
1222  }
1223}
1224
1225
1226define_array(BlockListArray, BlockList*)
1227define_stack(BlockListList, BlockListArray)
1228
1229class PredecessorValidator : public BlockClosure {
1230 private:
1231  BlockListList* _predecessors;
1232  BlockList*     _blocks;
1233
1234  static int cmp(BlockBegin** a, BlockBegin** b) {
1235    return (*a)->block_id() - (*b)->block_id();
1236  }
1237
1238 public:
1239  PredecessorValidator(IR* hir) {
1240    ResourceMark rm;
1241    _predecessors = new BlockListList(BlockBegin::number_of_blocks(), NULL);
1242    _blocks = new BlockList();
1243
1244    int i;
1245    hir->start()->iterate_preorder(this);
1246    if (hir->code() != NULL) {
1247      assert(hir->code()->length() == _blocks->length(), "must match");
1248      for (i = 0; i < _blocks->length(); i++) {
1249        assert(hir->code()->contains(_blocks->at(i)), "should be in both lists");
1250      }
1251    }
1252
1253    for (i = 0; i < _blocks->length(); i++) {
1254      BlockBegin* block = _blocks->at(i);
1255      BlockList* preds = _predecessors->at(block->block_id());
1256      if (preds == NULL) {
1257        assert(block->number_of_preds() == 0, "should be the same");
1258        continue;
1259      }
1260
1261      // clone the pred list so we can mutate it
1262      BlockList* pred_copy = new BlockList();
1263      int j;
1264      for (j = 0; j < block->number_of_preds(); j++) {
1265        pred_copy->append(block->pred_at(j));
1266      }
1267      // sort them in the same order
1268      preds->sort(cmp);
1269      pred_copy->sort(cmp);
1270      int length = MIN2(preds->length(), block->number_of_preds());
1271      for (j = 0; j < block->number_of_preds(); j++) {
1272        assert(preds->at(j) == pred_copy->at(j), "must match");
1273      }
1274
1275      assert(preds->length() == block->number_of_preds(), "should be the same");
1276    }
1277  }
1278
1279  virtual void block_do(BlockBegin* block) {
1280    _blocks->append(block);
1281    BlockEnd* be = block->end();
1282    int n = be->number_of_sux();
1283    int i;
1284    for (i = 0; i < n; i++) {
1285      BlockBegin* sux = be->sux_at(i);
1286      assert(!sux->is_set(BlockBegin::exception_entry_flag), "must not be xhandler");
1287
1288      BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL);
1289      if (preds == NULL) {
1290        preds = new BlockList();
1291        _predecessors->at_put(sux->block_id(), preds);
1292      }
1293      preds->append(block);
1294    }
1295
1296    n = block->number_of_exception_handlers();
1297    for (i = 0; i < n; i++) {
1298      BlockBegin* sux = block->exception_handler_at(i);
1299      assert(sux->is_set(BlockBegin::exception_entry_flag), "must be xhandler");
1300
1301      BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL);
1302      if (preds == NULL) {
1303        preds = new BlockList();
1304        _predecessors->at_put(sux->block_id(), preds);
1305      }
1306      preds->append(block);
1307    }
1308  }
1309};
1310
1311class VerifyBlockBeginField : public BlockClosure {
1312
1313public:
1314
1315  virtual void block_do(BlockBegin *block) {
1316    for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
1317      assert(cur->block() == block, "Block begin is not correct");
1318    }
1319  }
1320};
1321
1322void IR::verify() {
1323#ifdef ASSERT
1324  PredecessorValidator pv(this);
1325  VerifyBlockBeginField verifier;
1326  this->iterate_postorder(&verifier);
1327#endif
1328}
1329
1330#endif // PRODUCT
1331
1332void SubstitutionResolver::visit(Value* v) {
1333  Value v0 = *v;
1334  if (v0) {
1335    Value vs = v0->subst();
1336    if (vs != v0) {
1337      *v = v0->subst();
1338    }
1339  }
1340}
1341
1342#ifdef ASSERT
1343class SubstitutionChecker: public ValueVisitor {
1344  void visit(Value* v) {
1345    Value v0 = *v;
1346    if (v0) {
1347      Value vs = v0->subst();
1348      assert(vs == v0, "missed substitution");
1349    }
1350  }
1351};
1352#endif
1353
1354
1355void SubstitutionResolver::block_do(BlockBegin* block) {
1356  Instruction* last = NULL;
1357  for (Instruction* n = block; n != NULL;) {
1358    n->values_do(this);
1359    // need to remove this instruction from the instruction stream
1360    if (n->subst() != n) {
1361      assert(last != NULL, "must have last");
1362      last->set_next(n->next());
1363    } else {
1364      last = n;
1365    }
1366    n = last->next();
1367  }
1368
1369#ifdef ASSERT
1370  SubstitutionChecker check_substitute;
1371  if (block->state()) block->state()->values_do(&check_substitute);
1372  block->block_values_do(&check_substitute);
1373  if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute);
1374#endif
1375}
1376