block.hpp revision 5776:de6a9e811145
1/*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OPTO_BLOCK_HPP
26#define SHARE_VM_OPTO_BLOCK_HPP
27
28#include "opto/multnode.hpp"
29#include "opto/node.hpp"
30#include "opto/phase.hpp"
31
32// Optimization - Graph Style
33
34class Block;
35class CFGLoop;
36class MachCallNode;
37class Matcher;
38class RootNode;
39class VectorSet;
40struct Tarjan;
41
42//------------------------------Block_Array------------------------------------
43// Map dense integer indices to Blocks.  Uses classic doubling-array trick.
44// Abstractly provides an infinite array of Block*'s, initialized to NULL.
45// Note that the constructor just zeros things, and since I use Arena
46// allocation I do not need a destructor to reclaim storage.
47class Block_Array : public ResourceObj {
48  friend class VMStructs;
49  uint _size;                   // allocated size, as opposed to formal limit
50  debug_only(uint _limit;)      // limit to formal domain
51  Arena *_arena;                // Arena to allocate in
52protected:
53  Block **_blocks;
54  void grow( uint i );          // Grow array node to fit
55
56public:
57  Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
58    debug_only(_limit=0);
59    _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
60    for( int i = 0; i < OptoBlockListSize; i++ ) {
61      _blocks[i] = NULL;
62    }
63  }
64  Block *lookup( uint i ) const // Lookup, or NULL for not mapped
65  { return (i<Max()) ? _blocks[i] : (Block*)NULL; }
66  Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
67  { assert( i < Max(), "oob" ); return _blocks[i]; }
68  // Extend the mapping: index i maps to Block *n.
69  void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; }
70  uint Max() const { debug_only(return _limit); return _size; }
71};
72
73
74class Block_List : public Block_Array {
75  friend class VMStructs;
76public:
77  uint _cnt;
78  Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
79  void push( Block *b ) {  map(_cnt++,b); }
80  Block *pop() { return _blocks[--_cnt]; }
81  Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
82  void remove( uint i );
83  void insert( uint i, Block *n );
84  uint size() const { return _cnt; }
85  void reset() { _cnt = 0; }
86  void print();
87};
88
89
90class CFGElement : public ResourceObj {
91  friend class VMStructs;
92 public:
93  float _freq; // Execution frequency (estimate)
94
95  CFGElement() : _freq(0.0f) {}
96  virtual bool is_block() { return false; }
97  virtual bool is_loop()  { return false; }
98  Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
99  CFGLoop* as_CFGLoop()  { assert(is_loop(),  "must be loop");  return (CFGLoop*)this;  }
100};
101
102//------------------------------Block------------------------------------------
103// This class defines a Basic Block.
104// Basic blocks are used during the output routines, and are not used during
105// any optimization pass.  They are created late in the game.
106class Block : public CFGElement {
107  friend class VMStructs;
108
109private:
110  // Nodes in this block, in order
111  Node_List _nodes;
112
113public:
114
115  // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
116  Node* get_node(uint at_index) const {
117    return _nodes[at_index];
118  }
119
120  // Get the number of nodes in this block
121  uint number_of_nodes() const {
122    return _nodes.size();
123  }
124
125  // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
126  void map_node(Node* node, uint to_index) {
127    _nodes.map(to_index, node);
128  }
129
130  // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
131  void insert_node(Node* node, uint at_index) {
132    _nodes.insert(at_index, node);
133  }
134
135  // Remove a node at index 'at_index'
136  void remove_node(uint at_index) {
137    _nodes.remove(at_index);
138  }
139
140  // Push a node 'node' onto the node list
141  void push_node(Node* node) {
142    _nodes.push(node);
143  }
144
145  // Pop the last node off the node list
146  Node* pop_node() {
147    return _nodes.pop();
148  }
149
150  // Basic blocks have a Node which defines Control for all Nodes pinned in
151  // this block.  This Node is a RegionNode.  Exception-causing Nodes
152  // (division, subroutines) and Phi functions are always pinned.  Later,
153  // every Node will get pinned to some block.
154  Node *head() const { return get_node(0); }
155
156  // CAUTION: num_preds() is ONE based, so that predecessor numbers match
157  // input edges to Regions and Phis.
158  uint num_preds() const { return head()->req(); }
159  Node *pred(uint i) const { return head()->in(i); }
160
161  // Array of successor blocks, same size as projs array
162  Block_Array _succs;
163
164  // Basic blocks have some number of Nodes which split control to all
165  // following blocks.  These Nodes are always Projections.  The field in
166  // the Projection and the block-ending Node determine which Block follows.
167  uint _num_succs;
168
169  // Basic blocks also carry all sorts of good old fashioned DFS information
170  // used to find loops, loop nesting depth, dominators, etc.
171  uint _pre_order;              // Pre-order DFS number
172
173  // Dominator tree
174  uint _dom_depth;              // Depth in dominator tree for fast LCA
175  Block* _idom;                 // Immediate dominator block
176
177  CFGLoop *_loop;               // Loop to which this block belongs
178  uint _rpo;                    // Number in reverse post order walk
179
180  virtual bool is_block() { return true; }
181  float succ_prob(uint i);      // return probability of i'th successor
182  int num_fall_throughs();      // How many fall-through candidate this block has
183  void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code
184  bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate
185  Block* lone_fall_through();   // Return lone fall-through Block or null
186
187  Block* dom_lca(Block* that);  // Compute LCA in dominator tree.
188#ifdef ASSERT
189  bool dominates(Block* that) {
190    int dom_diff = this->_dom_depth - that->_dom_depth;
191    if (dom_diff > 0)  return false;
192    for (; dom_diff < 0; dom_diff++)  that = that->_idom;
193    return this == that;
194  }
195#endif
196
197  // Report the alignment required by this block.  Must be a power of 2.
198  // The previous block will insert nops to get this alignment.
199  uint code_alignment();
200  uint compute_loop_alignment();
201
202  // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
203  // It is currently also used to scale such frequencies relative to
204  // FreqCountInvocations relative to the old value of 1500.
205#define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
206
207  // Register Pressure (estimate) for Splitting heuristic
208  uint _reg_pressure;
209  uint _ihrp_index;
210  uint _freg_pressure;
211  uint _fhrp_index;
212
213  // Mark and visited bits for an LCA calculation in insert_anti_dependences.
214  // Since they hold unique node indexes, they do not need reinitialization.
215  node_idx_t _raise_LCA_mark;
216  void    set_raise_LCA_mark(node_idx_t x)    { _raise_LCA_mark = x; }
217  node_idx_t  raise_LCA_mark() const          { return _raise_LCA_mark; }
218  node_idx_t _raise_LCA_visited;
219  void    set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
220  node_idx_t  raise_LCA_visited() const       { return _raise_LCA_visited; }
221
222  // Estimated size in bytes of first instructions in a loop.
223  uint _first_inst_size;
224  uint first_inst_size() const     { return _first_inst_size; }
225  void set_first_inst_size(uint s) { _first_inst_size = s; }
226
227  // Compute the size of first instructions in this block.
228  uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
229
230  // Compute alignment padding if the block needs it.
231  // Align a loop if loop's padding is less or equal to padding limit
232  // or the size of first instructions in the loop > padding.
233  uint alignment_padding(int current_offset) {
234    int block_alignment = code_alignment();
235    int max_pad = block_alignment-relocInfo::addr_unit();
236    if( max_pad > 0 ) {
237      assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
238      int current_alignment = current_offset & max_pad;
239      if( current_alignment != 0 ) {
240        uint padding = (block_alignment-current_alignment) & max_pad;
241        if( has_loop_alignment() &&
242            padding > (uint)MaxLoopPad &&
243            first_inst_size() <= padding ) {
244          return 0;
245        }
246        return padding;
247      }
248    }
249    return 0;
250  }
251
252  // Connector blocks. Connector blocks are basic blocks devoid of
253  // instructions, but may have relevant non-instruction Nodes, such as
254  // Phis or MergeMems. Such blocks are discovered and marked during the
255  // RemoveEmpty phase, and elided during Output.
256  bool _connector;
257  void set_connector() { _connector = true; }
258  bool is_connector() const { return _connector; };
259
260  // Loop_alignment will be set for blocks which are at the top of loops.
261  // The block layout pass may rotate loops such that the loop head may not
262  // be the sequentially first block of the loop encountered in the linear
263  // list of blocks.  If the layout pass is not run, loop alignment is set
264  // for each block which is the head of a loop.
265  uint _loop_alignment;
266  void set_loop_alignment(Block *loop_top) {
267    uint new_alignment = loop_top->compute_loop_alignment();
268    if (new_alignment > _loop_alignment) {
269      _loop_alignment = new_alignment;
270    }
271  }
272  uint loop_alignment() const { return _loop_alignment; }
273  bool has_loop_alignment() const { return loop_alignment() > 0; }
274
275  // Create a new Block with given head Node.
276  // Creates the (empty) predecessor arrays.
277  Block( Arena *a, Node *headnode )
278    : CFGElement(),
279      _nodes(a),
280      _succs(a),
281      _num_succs(0),
282      _pre_order(0),
283      _idom(0),
284      _loop(NULL),
285      _reg_pressure(0),
286      _ihrp_index(1),
287      _freg_pressure(0),
288      _fhrp_index(1),
289      _raise_LCA_mark(0),
290      _raise_LCA_visited(0),
291      _first_inst_size(999999),
292      _connector(false),
293      _loop_alignment(0) {
294    _nodes.push(headnode);
295  }
296
297  // Index of 'end' Node
298  uint end_idx() const {
299    // %%%%% add a proj after every goto
300    // so (last->is_block_proj() != last) always, then simplify this code
301    // This will not give correct end_idx for block 0 when it only contains root.
302    int last_idx = _nodes.size() - 1;
303    Node *last  = _nodes[last_idx];
304    assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
305    return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
306  }
307
308  // Basic blocks have a Node which ends them.  This Node determines which
309  // basic block follows this one in the program flow.  This Node is either an
310  // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
311  Node *end() const { return _nodes[end_idx()]; }
312
313  // Add an instruction to an existing block.  It must go after the head
314  // instruction and before the end instruction.
315  void add_inst( Node *n ) { insert_node(n, end_idx()); }
316  // Find node in block
317  uint find_node( const Node *n ) const;
318  // Find and remove n from block list
319  void find_remove( const Node *n );
320
321  // Return the empty status of a block
322  enum { not_empty, empty_with_goto, completely_empty };
323  int is_Empty() const;
324
325  // Forward through connectors
326  Block* non_connector() {
327    Block* s = this;
328    while (s->is_connector()) {
329      s = s->_succs[0];
330    }
331    return s;
332  }
333
334  // Return true if b is a successor of this block
335  bool has_successor(Block* b) const {
336    for (uint i = 0; i < _num_succs; i++ ) {
337      if (non_connector_successor(i) == b) {
338        return true;
339      }
340    }
341    return false;
342  }
343
344  // Successor block, after forwarding through connectors
345  Block* non_connector_successor(int i) const {
346    return _succs[i]->non_connector();
347  }
348
349  // Examine block's code shape to predict if it is not commonly executed.
350  bool has_uncommon_code() const;
351
352#ifndef PRODUCT
353  // Debugging print of basic block
354  void dump_bidx(const Block* orig, outputStream* st = tty) const;
355  void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
356  void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
357  void dump() const;
358  void dump(const PhaseCFG* cfg) const;
359#endif
360};
361
362
363//------------------------------PhaseCFG---------------------------------------
364// Build an array of Basic Block pointers, one per Node.
365class PhaseCFG : public Phase {
366  friend class VMStructs;
367 private:
368
369  // Root of whole program
370  RootNode* _root;
371
372  // The block containing the root node
373  Block* _root_block;
374
375  // List of basic blocks that are created during CFG creation
376  Block_List _blocks;
377
378  // Count of basic blocks
379  uint _number_of_blocks;
380
381  // Arena for the blocks to be stored in
382  Arena* _block_arena;
383
384  // The matcher for this compilation
385  Matcher& _matcher;
386
387  // Map nodes to owning basic block
388  Block_Array _node_to_block_mapping;
389
390  // Loop from the root
391  CFGLoop* _root_loop;
392
393  // Outmost loop frequency
394  float _outer_loop_frequency;
395
396  // Per node latency estimation, valid only during GCM
397  GrowableArray<uint>* _node_latency;
398
399  // Build a proper looking cfg.  Return count of basic blocks
400  uint build_cfg();
401
402  // Build the dominator tree so that we know where we can move instructions
403  void build_dominator_tree();
404
405  // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions
406  void estimate_block_frequency();
407
408  // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
409  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
410  // Move nodes to ensure correctness from GVN and also try to move nodes out of loops.
411  void global_code_motion();
412
413  // Schedule Nodes early in their basic blocks.
414  bool schedule_early(VectorSet &visited, Node_List &roots);
415
416  // For each node, find the latest block it can be scheduled into
417  // and then select the cheapest block between the latest and earliest
418  // block to place the node.
419  void schedule_late(VectorSet &visited, Node_List &stack);
420
421  // Compute the (backwards) latency of a node from a single use
422  int latency_from_use(Node *n, const Node *def, Node *use);
423
424  // Compute the (backwards) latency of a node from the uses of this instruction
425  void partial_latency_of_defs(Node *n);
426
427  // Compute the instruction global latency with a backwards walk
428  void compute_latencies_backwards(VectorSet &visited, Node_List &stack);
429
430  // Pick a block between early and late that is a cheaper alternative
431  // to late. Helper for schedule_late.
432  Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
433
434  bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
435  void set_next_call(Block* block, Node* n, VectorSet& next_call);
436  void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
437
438  // Perform basic-block local scheduling
439  Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
440
441  // Schedule a call next in the block
442  uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
443
444  // Cleanup if any code lands between a Call and his Catch
445  void call_catch_cleanup(Block* block);
446
447  Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
448  void  catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
449
450  // Detect implicit-null-check opportunities.  Basically, find NULL checks
451  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
452  // I can generate a memory op if there is not one nearby.
453  void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
454
455  // Perform a Depth First Search (DFS).
456  // Setup 'vertex' as DFS to vertex mapping.
457  // Setup 'semi' as vertex to DFS mapping.
458  // Set 'parent' to DFS parent.
459  uint do_DFS(Tarjan* tarjan, uint rpo_counter);
460
461  // Helper function to insert a node into a block
462  void schedule_node_into_block( Node *n, Block *b );
463
464  void replace_block_proj_ctrl( Node *n );
465
466  // Set the basic block for pinned Nodes
467  void schedule_pinned_nodes( VectorSet &visited );
468
469  // I'll need a few machine-specific GotoNodes.  Clone from this one.
470  // Used when building the CFG and creating end nodes for blocks.
471  MachNode* _goto;
472
473  Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
474  void verify_anti_dependences(Block* LCA, Node* load) {
475    assert(LCA == get_block_for_node(load), "should already be scheduled");
476    insert_anti_dependences(LCA, load, true);
477  }
478
479  bool move_to_next(Block* bx, uint b_index);
480  void move_to_end(Block* bx, uint b_index);
481
482  void insert_goto_at(uint block_no, uint succ_no);
483
484  // Check for NeverBranch at block end.  This needs to become a GOTO to the
485  // true target.  NeverBranch are treated as a conditional branch that always
486  // goes the same direction for most of the optimizer and are used to give a
487  // fake exit path to infinite loops.  At this late stage they need to turn
488  // into Goto's so that when you enter the infinite loop you indeed hang.
489  void convert_NeverBranch_to_Goto(Block *b);
490
491  CFGLoop* create_loop_tree();
492
493  #ifndef PRODUCT
494  bool _trace_opto_pipelining;  // tracing flag
495  #endif
496
497 public:
498  PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
499
500  void set_latency_for_node(Node* node, int latency) {
501    _node_latency->at_put_grow(node->_idx, latency);
502  }
503
504  uint get_latency_for_node(Node* node) {
505    return _node_latency->at_grow(node->_idx);
506  }
507
508  // Get the outer most frequency
509  float get_outer_loop_frequency() const {
510    return _outer_loop_frequency;
511  }
512
513  // Get the root node of the CFG
514  RootNode* get_root_node() const {
515    return _root;
516  }
517
518  // Get the block of the root node
519  Block* get_root_block() const {
520    return _root_block;
521  }
522
523  // Add a block at a position and moves the later ones one step
524  void add_block_at(uint pos, Block* block) {
525    _blocks.insert(pos, block);
526    _number_of_blocks++;
527  }
528
529  // Adds a block to the top of the block list
530  void add_block(Block* block) {
531    _blocks.push(block);
532    _number_of_blocks++;
533  }
534
535  // Clear the list of blocks
536  void clear_blocks() {
537    _blocks.reset();
538    _number_of_blocks = 0;
539  }
540
541  // Get the block at position pos in _blocks
542  Block* get_block(uint pos) const {
543    return _blocks[pos];
544  }
545
546  // Number of blocks
547  uint number_of_blocks() const {
548    return _number_of_blocks;
549  }
550
551  // set which block this node should reside in
552  void map_node_to_block(const Node* node, Block* block) {
553    _node_to_block_mapping.map(node->_idx, block);
554  }
555
556  // removes the mapping from a node to a block
557  void unmap_node_from_block(const Node* node) {
558    _node_to_block_mapping.map(node->_idx, NULL);
559  }
560
561  // get the block in which this node resides
562  Block* get_block_for_node(const Node* node) const {
563    return _node_to_block_mapping[node->_idx];
564  }
565
566  // does this node reside in a block; return true
567  bool has_block(const Node* node) const {
568    return (_node_to_block_mapping.lookup(node->_idx) != NULL);
569  }
570
571  // Use frequency calculations and code shape to predict if the block
572  // is uncommon.
573  bool is_uncommon(const Block* block);
574
575#ifdef ASSERT
576  Unique_Node_List _raw_oops;
577#endif
578
579  // Do global code motion by first building dominator tree and estimate block frequency
580  // Returns true on success
581  bool do_global_code_motion();
582
583  // Compute the (backwards) latency of a node from the uses
584  void latency_from_uses(Node *n);
585
586  // Set loop alignment
587  void set_loop_alignment();
588
589  // Remove empty basic blocks
590  void remove_empty_blocks();
591  void fixup_flow();
592
593  // Insert a node into a block at index and map the node to the block
594  void insert(Block *b, uint idx, Node *n) {
595    b->insert_node(n , idx);
596    map_node_to_block(n, b);
597  }
598
599#ifndef PRODUCT
600  bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
601
602  // Debugging print of CFG
603  void dump( ) const;           // CFG only
604  void _dump_cfg( const Node *end, VectorSet &visited  ) const;
605  void verify() const;
606  void dump_headers();
607#else
608  bool trace_opto_pipelining() const { return false; }
609#endif
610};
611
612
613//------------------------------UnionFind--------------------------------------
614// Map Block indices to a block-index for a cfg-cover.
615// Array lookup in the optimized case.
616class UnionFind : public ResourceObj {
617  uint _cnt, _max;
618  uint* _indices;
619  ReallocMark _nesting;  // assertion check for reallocations
620public:
621  UnionFind( uint max );
622  void reset( uint max );  // Reset to identity map for [0..max]
623
624  uint lookup( uint nidx ) const {
625    return _indices[nidx];
626  }
627  uint operator[] (uint nidx) const { return lookup(nidx); }
628
629  void map( uint from_idx, uint to_idx ) {
630    assert( from_idx < _cnt, "oob" );
631    _indices[from_idx] = to_idx;
632  }
633  void extend( uint from_idx, uint to_idx );
634
635  uint Size() const { return _cnt; }
636
637  uint Find( uint idx ) {
638    assert( idx < 65536, "Must fit into uint");
639    uint uf_idx = lookup(idx);
640    return (uf_idx == idx) ? uf_idx : Find_compress(idx);
641  }
642  uint Find_compress( uint idx );
643  uint Find_const( uint idx ) const;
644  void Union( uint idx1, uint idx2 );
645
646};
647
648//----------------------------BlockProbPair---------------------------
649// Ordered pair of Node*.
650class BlockProbPair VALUE_OBJ_CLASS_SPEC {
651protected:
652  Block* _target;      // block target
653  float  _prob;        // probability of edge to block
654public:
655  BlockProbPair() : _target(NULL), _prob(0.0) {}
656  BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
657
658  Block* get_target() const { return _target; }
659  float get_prob() const { return _prob; }
660};
661
662//------------------------------CFGLoop-------------------------------------------
663class CFGLoop : public CFGElement {
664  friend class VMStructs;
665  int _id;
666  int _depth;
667  CFGLoop *_parent;      // root of loop tree is the method level "pseudo" loop, it's parent is null
668  CFGLoop *_sibling;     // null terminated list
669  CFGLoop *_child;       // first child, use child's sibling to visit all immediately nested loops
670  GrowableArray<CFGElement*> _members; // list of members of loop
671  GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
672  float _exit_prob;       // probability any loop exit is taken on a single loop iteration
673  void update_succ_freq(Block* b, float freq);
674
675 public:
676  CFGLoop(int id) :
677    CFGElement(),
678    _id(id),
679    _depth(0),
680    _parent(NULL),
681    _sibling(NULL),
682    _child(NULL),
683    _exit_prob(1.0f) {}
684  CFGLoop* parent() { return _parent; }
685  void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
686  void add_member(CFGElement *s) { _members.push(s); }
687  void add_nested_loop(CFGLoop* cl);
688  Block* head() {
689    assert(_members.at(0)->is_block(), "head must be a block");
690    Block* hd = _members.at(0)->as_Block();
691    assert(hd->_loop == this, "just checking");
692    assert(hd->head()->is_Loop(), "must begin with loop head node");
693    return hd;
694  }
695  Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
696  void compute_loop_depth(int depth);
697  void compute_freq(); // compute frequency with loop assuming head freq 1.0f
698  void scale_freq();   // scale frequency by loop trip count (including outer loops)
699  float outer_loop_freq() const; // frequency of outer loop
700  bool in_loop_nest(Block* b);
701  float trip_count() const { return 1.0f / _exit_prob; }
702  virtual bool is_loop()  { return true; }
703  int id() { return _id; }
704
705#ifndef PRODUCT
706  void dump( ) const;
707  void dump_tree() const;
708#endif
709};
710
711
712//----------------------------------CFGEdge------------------------------------
713// A edge between two basic blocks that will be embodied by a branch or a
714// fall-through.
715class CFGEdge : public ResourceObj {
716  friend class VMStructs;
717 private:
718  Block * _from;        // Source basic block
719  Block * _to;          // Destination basic block
720  float _freq;          // Execution frequency (estimate)
721  int   _state;
722  bool  _infrequent;
723  int   _from_pct;
724  int   _to_pct;
725
726  // Private accessors
727  int  from_pct() const { return _from_pct; }
728  int  to_pct()   const { return _to_pct;   }
729  int  from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; }
730  int  to_infrequent()   const { return to_pct()   < BlockLayoutMinDiamondPercentage; }
731
732 public:
733  enum {
734    open,               // initial edge state; unprocessed
735    connected,          // edge used to connect two traces together
736    interior            // edge is interior to trace (could be backedge)
737  };
738
739  CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
740    _from(from), _to(to), _freq(freq),
741    _from_pct(from_pct), _to_pct(to_pct), _state(open) {
742    _infrequent = from_infrequent() || to_infrequent();
743  }
744
745  float  freq() const { return _freq; }
746  Block* from() const { return _from; }
747  Block* to  () const { return _to;   }
748  int  infrequent() const { return _infrequent; }
749  int state() const { return _state; }
750
751  void set_state(int state) { _state = state; }
752
753#ifndef PRODUCT
754  void dump( ) const;
755#endif
756};
757
758
759//-----------------------------------Trace-------------------------------------
760// An ordered list of basic blocks.
761class Trace : public ResourceObj {
762 private:
763  uint _id;             // Unique Trace id (derived from initial block)
764  Block ** _next_list;  // Array mapping index to next block
765  Block ** _prev_list;  // Array mapping index to previous block
766  Block * _first;       // First block in the trace
767  Block * _last;        // Last block in the trace
768
769  // Return the block that follows "b" in the trace.
770  Block * next(Block *b) const { return _next_list[b->_pre_order]; }
771  void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
772
773  // Return the block that precedes "b" in the trace.
774  Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
775  void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }
776
777  // We've discovered a loop in this trace. Reset last to be "b", and first as
778  // the block following "b
779  void break_loop_after(Block *b) {
780    _last = b;
781    _first = next(b);
782    set_prev(_first, NULL);
783    set_next(_last, NULL);
784  }
785
786 public:
787
788  Trace(Block *b, Block **next_list, Block **prev_list) :
789    _first(b),
790    _last(b),
791    _next_list(next_list),
792    _prev_list(prev_list),
793    _id(b->_pre_order) {
794    set_next(b, NULL);
795    set_prev(b, NULL);
796  };
797
798  // Return the id number
799  uint id() const { return _id; }
800  void set_id(uint id) { _id = id; }
801
802  // Return the first block in the trace
803  Block * first_block() const { return _first; }
804
805  // Return the last block in the trace
806  Block * last_block() const { return _last; }
807
808  // Insert a trace in the middle of this one after b
809  void insert_after(Block *b, Trace *tr) {
810    set_next(tr->last_block(), next(b));
811    if (next(b) != NULL) {
812      set_prev(next(b), tr->last_block());
813    }
814
815    set_next(b, tr->first_block());
816    set_prev(tr->first_block(), b);
817
818    if (b == _last) {
819      _last = tr->last_block();
820    }
821  }
822
823  void insert_before(Block *b, Trace *tr) {
824    Block *p = prev(b);
825    assert(p != NULL, "use append instead");
826    insert_after(p, tr);
827  }
828
829  // Append another trace to this one.
830  void append(Trace *tr) {
831    insert_after(_last, tr);
832  }
833
834  // Append a block at the end of this trace
835  void append(Block *b) {
836    set_next(_last, b);
837    set_prev(b, _last);
838    _last = b;
839  }
840
841  // Adjust the the blocks in this trace
842  void fixup_blocks(PhaseCFG &cfg);
843  bool backedge(CFGEdge *e);
844
845#ifndef PRODUCT
846  void dump( ) const;
847#endif
848};
849
850//------------------------------PhaseBlockLayout-------------------------------
851// Rearrange blocks into some canonical order, based on edges and their frequencies
852class PhaseBlockLayout : public Phase {
853  friend class VMStructs;
854  PhaseCFG &_cfg;               // Control flow graph
855
856  GrowableArray<CFGEdge *> *edges;
857  Trace **traces;
858  Block **next;
859  Block **prev;
860  UnionFind *uf;
861
862  // Given a block, find its encompassing Trace
863  Trace * trace(Block *b) {
864    return traces[uf->Find_compress(b->_pre_order)];
865  }
866 public:
867  PhaseBlockLayout(PhaseCFG &cfg);
868
869  void find_edges();
870  void grow_traces();
871  void merge_traces(bool loose_connections);
872  void reorder_traces(int count);
873  void union_traces(Trace* from, Trace* to);
874};
875
876#endif // SHARE_VM_OPTO_BLOCK_HPP
877