node.hpp revision 113:ba764ed4b6f2
1/*
2 * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Portions of code courtesy of Clifford Click
26
27// Optimization - Graph Style
28
29
30class AbstractLockNode;
31class AddNode;
32class AddPNode;
33class AliasInfo;
34class AllocateArrayNode;
35class AllocateNode;
36class Block;
37class Block_Array;
38class BoolNode;
39class BoxLockNode;
40class CMoveNode;
41class CallDynamicJavaNode;
42class CallJavaNode;
43class CallLeafNode;
44class CallNode;
45class CallRuntimeNode;
46class CallStaticJavaNode;
47class CatchNode;
48class CatchProjNode;
49class CheckCastPPNode;
50class CmpNode;
51class CodeBuffer;
52class ConstraintCastNode;
53class ConNode;
54class CountedLoopNode;
55class CountedLoopEndNode;
56class FastLockNode;
57class FastUnlockNode;
58class IfNode;
59class InitializeNode;
60class JVMState;
61class JumpNode;
62class JumpProjNode;
63class LoadNode;
64class LoadStoreNode;
65class LockNode;
66class LoopNode;
67class MachCallDynamicJavaNode;
68class MachCallJavaNode;
69class MachCallLeafNode;
70class MachCallNode;
71class MachCallRuntimeNode;
72class MachCallStaticJavaNode;
73class MachIfNode;
74class MachNode;
75class MachNullCheckNode;
76class MachReturnNode;
77class MachSafePointNode;
78class MachSpillCopyNode;
79class MachTempNode;
80class Matcher;
81class MemBarNode;
82class MemNode;
83class MergeMemNode;
84class MulNode;
85class MultiNode;
86class MultiBranchNode;
87class NeverBranchNode;
88class Node;
89class Node_Array;
90class Node_List;
91class Node_Stack;
92class NullCheckNode;
93class OopMap;
94class ParmNode;
95class PCTableNode;
96class PhaseCCP;
97class PhaseGVN;
98class PhaseIterGVN;
99class PhaseRegAlloc;
100class PhaseTransform;
101class PhaseValues;
102class PhiNode;
103class Pipeline;
104class ProjNode;
105class RegMask;
106class RegionNode;
107class RootNode;
108class SafePointNode;
109class SafePointScalarObjectNode;
110class StartNode;
111class State;
112class StoreNode;
113class SubNode;
114class Type;
115class TypeNode;
116class UnlockNode;
117class VectorSet;
118class IfTrueNode;
119class IfFalseNode;
120typedef void (*NFunc)(Node&,void*);
121extern "C" {
122  typedef int (*C_sort_func_t)(const void *, const void *);
123}
124
125// The type of all node counts and indexes.
126// It must hold at least 16 bits, but must also be fast to load and store.
127// This type, if less than 32 bits, could limit the number of possible nodes.
128// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
129typedef unsigned int node_idx_t;
130
131
132#ifndef OPTO_DU_ITERATOR_ASSERT
133#ifdef ASSERT
134#define OPTO_DU_ITERATOR_ASSERT 1
135#else
136#define OPTO_DU_ITERATOR_ASSERT 0
137#endif
138#endif //OPTO_DU_ITERATOR_ASSERT
139
140#if OPTO_DU_ITERATOR_ASSERT
141class DUIterator;
142class DUIterator_Fast;
143class DUIterator_Last;
144#else
145typedef uint   DUIterator;
146typedef Node** DUIterator_Fast;
147typedef Node** DUIterator_Last;
148#endif
149
150// Node Sentinel
151#define NodeSentinel (Node*)-1
152
153// Unknown count frequency
154#define COUNT_UNKNOWN (-1.0f)
155
156//------------------------------Node-------------------------------------------
157// Nodes define actions in the program.  They create values, which have types.
158// They are both vertices in a directed graph and program primitives.  Nodes
159// are labeled; the label is the "opcode", the primitive function in the lambda
160// calculus sense that gives meaning to the Node.  Node inputs are ordered (so
161// that "a-b" is different from "b-a").  The inputs to a Node are the inputs to
162// the Node's function.  These inputs also define a Type equation for the Node.
163// Solving these Type equations amounts to doing dataflow analysis.
164// Control and data are uniformly represented in the graph.  Finally, Nodes
165// have a unique dense integer index which is used to index into side arrays
166// whenever I have phase-specific information.
167
168class Node {
169  // Lots of restrictions on cloning Nodes
170  Node(const Node&);            // not defined; linker error to use these
171  Node &operator=(const Node &rhs);
172
173public:
174  friend class Compile;
175  #if OPTO_DU_ITERATOR_ASSERT
176  friend class DUIterator_Common;
177  friend class DUIterator;
178  friend class DUIterator_Fast;
179  friend class DUIterator_Last;
180  #endif
181
182  // Because Nodes come and go, I define an Arena of Node structures to pull
183  // from.  This should allow fast access to node creation & deletion.  This
184  // field is a local cache of a value defined in some "program fragment" for
185  // which these Nodes are just a part of.
186
187  // New Operator that takes a Compile pointer, this will eventually
188  // be the "new" New operator.
189  inline void* operator new( size_t x, Compile* C) {
190    Node* n = (Node*)C->node_arena()->Amalloc_D(x);
191#ifdef ASSERT
192    n->_in = (Node**)n; // magic cookie for assertion check
193#endif
194    n->_out = (Node**)C;
195    return (void*)n;
196  }
197
198  // New Operator that takes a Compile pointer, this will eventually
199  // be the "new" New operator.
200  inline void* operator new( size_t x, Compile* C, int y) {
201    Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*));
202    n->_in = (Node**)(((char*)n) + x);
203#ifdef ASSERT
204    n->_in[y-1] = n; // magic cookie for assertion check
205#endif
206    n->_out = (Node**)C;
207    return (void*)n;
208  }
209
210  // Delete is a NOP
211  void operator delete( void *ptr ) {}
212  // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
213  void destruct();
214
215  // Create a new Node.  Required is the number is of inputs required for
216  // semantic correctness.
217  Node( uint required );
218
219  // Create a new Node with given input edges.
220  // This version requires use of the "edge-count" new.
221  // E.g.  new (C,3) FooNode( C, NULL, left, right );
222  Node( Node *n0 );
223  Node( Node *n0, Node *n1 );
224  Node( Node *n0, Node *n1, Node *n2 );
225  Node( Node *n0, Node *n1, Node *n2, Node *n3 );
226  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
227  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
228  Node( Node *n0, Node *n1, Node *n2, Node *n3,
229            Node *n4, Node *n5, Node *n6 );
230
231  // Clone an inherited Node given only the base Node type.
232  Node* clone() const;
233
234  // Clone a Node, immediately supplying one or two new edges.
235  // The first and second arguments, if non-null, replace in(1) and in(2),
236  // respectively.
237  Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
238    Node* nn = clone();
239    if (in1 != NULL)  nn->set_req(1, in1);
240    if (in2 != NULL)  nn->set_req(2, in2);
241    return nn;
242  }
243
244private:
245  // Shared setup for the above constructors.
246  // Handles all interactions with Compile::current.
247  // Puts initial values in all Node fields except _idx.
248  // Returns the initial value for _idx, which cannot
249  // be initialized by assignment.
250  inline int Init(int req, Compile* C);
251
252//----------------- input edge handling
253protected:
254  friend class PhaseCFG;        // Access to address of _in array elements
255  Node **_in;                   // Array of use-def references to Nodes
256  Node **_out;                  // Array of def-use references to Nodes
257
258  // Input edges are split into two catagories.  Required edges are required
259  // for semantic correctness; order is important and NULLs are allowed.
260  // Precedence edges are used to help determine execution order and are
261  // added, e.g., for scheduling purposes.  They are unordered and not
262  // duplicated; they have no embedded NULLs.  Edges from 0 to _cnt-1
263  // are required, from _cnt to _max-1 are precedence edges.
264  node_idx_t _cnt;              // Total number of required Node inputs.
265
266  node_idx_t _max;              // Actual length of input array.
267
268  // Output edges are an unordered list of def-use edges which exactly
269  // correspond to required input edges which point from other nodes
270  // to this one.  Thus the count of the output edges is the number of
271  // users of this node.
272  node_idx_t _outcnt;           // Total number of Node outputs.
273
274  node_idx_t _outmax;           // Actual length of output array.
275
276  // Grow the actual input array to the next larger power-of-2 bigger than len.
277  void grow( uint len );
278  // Grow the output array to the next larger power-of-2 bigger than len.
279  void out_grow( uint len );
280
281 public:
282  // Each Node is assigned a unique small/dense number.  This number is used
283  // to index into auxiliary arrays of data and bitvectors.
284  // It is declared const to defend against inadvertant assignment,
285  // since it is used by clients as a naked field.
286  const node_idx_t _idx;
287
288  // Get the (read-only) number of input edges
289  uint req() const { return _cnt; }
290  uint len() const { return _max; }
291  // Get the (read-only) number of output edges
292  uint outcnt() const { return _outcnt; }
293
294#if OPTO_DU_ITERATOR_ASSERT
295  // Iterate over the out-edges of this node.  Deletions are illegal.
296  inline DUIterator outs() const;
297  // Use this when the out array might have changed to suppress asserts.
298  inline DUIterator& refresh_out_pos(DUIterator& i) const;
299  // Does the node have an out at this position?  (Used for iteration.)
300  inline bool has_out(DUIterator& i) const;
301  inline Node*    out(DUIterator& i) const;
302  // Iterate over the out-edges of this node.  All changes are illegal.
303  inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
304  inline Node*    fast_out(DUIterator_Fast& i) const;
305  // Iterate over the out-edges of this node, deleting one at a time.
306  inline DUIterator_Last last_outs(DUIterator_Last& min) const;
307  inline Node*    last_out(DUIterator_Last& i) const;
308  // The inline bodies of all these methods are after the iterator definitions.
309#else
310  // Iterate over the out-edges of this node.  Deletions are illegal.
311  // This iteration uses integral indexes, to decouple from array reallocations.
312  DUIterator outs() const  { return 0; }
313  // Use this when the out array might have changed to suppress asserts.
314  DUIterator refresh_out_pos(DUIterator i) const { return i; }
315
316  // Reference to the i'th output Node.  Error if out of bounds.
317  Node*    out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
318  // Does the node have an out at this position?  (Used for iteration.)
319  bool has_out(DUIterator i) const { return i < _outcnt; }
320
321  // Iterate over the out-edges of this node.  All changes are illegal.
322  // This iteration uses a pointer internal to the out array.
323  DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
324    Node** out = _out;
325    // Assign a limit pointer to the reference argument:
326    max = out + (ptrdiff_t)_outcnt;
327    // Return the base pointer:
328    return out;
329  }
330  Node*    fast_out(DUIterator_Fast i) const  { return *i; }
331  // Iterate over the out-edges of this node, deleting one at a time.
332  // This iteration uses a pointer internal to the out array.
333  DUIterator_Last last_outs(DUIterator_Last& min) const {
334    Node** out = _out;
335    // Assign a limit pointer to the reference argument:
336    min = out;
337    // Return the pointer to the start of the iteration:
338    return out + (ptrdiff_t)_outcnt - 1;
339  }
340  Node*    last_out(DUIterator_Last i) const  { return *i; }
341#endif
342
343  // Reference to the i'th input Node.  Error if out of bounds.
344  Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; }
345  // Reference to the i'th output Node.  Error if out of bounds.
346  // Use this accessor sparingly.  We are going trying to use iterators instead.
347  Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
348  // Return the unique out edge.
349  Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
350  // Delete out edge at position 'i' by moving last out edge to position 'i'
351  void  raw_del_out(uint i) {
352    assert(i < _outcnt,"oob");
353    assert(_outcnt > 0,"oob");
354    #if OPTO_DU_ITERATOR_ASSERT
355    // Record that a change happened here.
356    debug_only(_last_del = _out[i]; ++_del_tick);
357    #endif
358    _out[i] = _out[--_outcnt];
359    // Smash the old edge so it can't be used accidentally.
360    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
361  }
362
363#ifdef ASSERT
364  bool is_dead() const;
365#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
366#endif
367
368  // Set a required input edge, also updates corresponding output edge
369  void add_req( Node *n ); // Append a NEW required input
370  void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
371  void del_req( uint idx ); // Delete required edge & compact
372  void ins_req( uint i, Node *n ); // Insert a NEW required input
373  void set_req( uint i, Node *n ) {
374    assert( is_not_dead(n), "can not use dead node");
375    assert( i < _cnt, "oob");
376    assert( !VerifyHashTableKeys || _hash_lock == 0,
377            "remove node from hash table before modifying it");
378    Node** p = &_in[i];    // cache this._in, across the del_out call
379    if (*p != NULL)  (*p)->del_out((Node *)this);
380    (*p) = n;
381    if (n != NULL)      n->add_out((Node *)this);
382  }
383  // Light version of set_req() to init inputs after node creation.
384  void init_req( uint i, Node *n ) {
385    assert( i == 0 && this == n ||
386            is_not_dead(n), "can not use dead node");
387    assert( i < _cnt, "oob");
388    assert( !VerifyHashTableKeys || _hash_lock == 0,
389            "remove node from hash table before modifying it");
390    assert( _in[i] == NULL, "sanity");
391    _in[i] = n;
392    if (n != NULL)      n->add_out((Node *)this);
393  }
394  // Find first occurrence of n among my edges:
395  int find_edge(Node* n);
396  int replace_edge(Node* old, Node* neww);
397  // NULL out all inputs to eliminate incoming Def-Use edges.
398  // Return the number of edges between 'n' and 'this'
399  int  disconnect_inputs(Node *n);
400
401  // Quickly, return true if and only if I am Compile::current()->top().
402  bool is_top() const {
403    assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
404    return (_out == NULL);
405  }
406  // Reaffirm invariants for is_top.  (Only from Compile::set_cached_top_node.)
407  void setup_is_top();
408
409  // Strip away casting.  (It is depth-limited.)
410  Node* uncast() const;
411
412private:
413  static Node* uncast_helper(const Node* n);
414
415  // Add an output edge to the end of the list
416  void add_out( Node *n ) {
417    if (is_top())  return;
418    if( _outcnt == _outmax ) out_grow(_outcnt);
419    _out[_outcnt++] = n;
420  }
421  // Delete an output edge
422  void del_out( Node *n ) {
423    if (is_top())  return;
424    Node** outp = &_out[_outcnt];
425    // Find and remove n
426    do {
427      assert(outp > _out, "Missing Def-Use edge");
428    } while (*--outp != n);
429    *outp = _out[--_outcnt];
430    // Smash the old edge so it can't be used accidentally.
431    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
432    // Record that a change happened here.
433    #if OPTO_DU_ITERATOR_ASSERT
434    debug_only(_last_del = n; ++_del_tick);
435    #endif
436  }
437
438public:
439  // Globally replace this node by a given new node, updating all uses.
440  void replace_by(Node* new_node);
441  void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
442  // Find the one non-null required input.  RegionNode only
443  Node *nonnull_req() const;
444  // Add or remove precedence edges
445  void add_prec( Node *n );
446  void rm_prec( uint i );
447  void set_prec( uint i, Node *n ) {
448    assert( is_not_dead(n), "can not use dead node");
449    assert( i >= _cnt, "not a precedence edge");
450    if (_in[i] != NULL) _in[i]->del_out((Node *)this);
451    _in[i] = n;
452    if (n != NULL) n->add_out((Node *)this);
453  }
454  // Set this node's index, used by cisc_version to replace current node
455  void set_idx(uint new_idx) {
456    const node_idx_t* ref = &_idx;
457    *(node_idx_t*)ref = new_idx;
458  }
459  // Swap input edge order.  (Edge indexes i1 and i2 are usually 1 and 2.)
460  void swap_edges(uint i1, uint i2) {
461    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
462    // Def-Use info is unchanged
463    Node* n1 = in(i1);
464    Node* n2 = in(i2);
465    _in[i1] = n2;
466    _in[i2] = n1;
467    // If this node is in the hash table, make sure it doesn't need a rehash.
468    assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
469  }
470
471  // Iterators over input Nodes for a Node X are written as:
472  // for( i = 0; i < X.req(); i++ ) ... X[i] ...
473  // NOTE: Required edges can contain embedded NULL pointers.
474
475//----------------- Other Node Properties
476
477  // Generate class id for some ideal nodes to avoid virtual query
478  // methods is_<Node>().
479  // Class id is the set of bits corresponded to the node class and all its
480  // super classes so that queries for super classes are also valid.
481  // Subclasses of the same super class have different assigned bit
482  // (the third parameter in the macro DEFINE_CLASS_ID).
483  // Classes with deeper hierarchy are declared first.
484  // Classes with the same hierarchy depth are sorted by usage frequency.
485  //
486  // The query method masks the bits to cut off bits of subclasses
487  // and then compare the result with the class id
488  // (see the macro DEFINE_CLASS_QUERY below).
489  //
490  //  Class_MachCall=30, ClassMask_MachCall=31
491  // 12               8               4               0
492  //  0   0   0   0   0   0   0   0   1   1   1   1   0
493  //                                  |   |   |   |
494  //                                  |   |   |   Bit_Mach=2
495  //                                  |   |   Bit_MachReturn=4
496  //                                  |   Bit_MachSafePoint=8
497  //                                  Bit_MachCall=16
498  //
499  //  Class_CountedLoop=56, ClassMask_CountedLoop=63
500  // 12               8               4               0
501  //  0   0   0   0   0   0   0   1   1   1   0   0   0
502  //                              |   |   |
503  //                              |   |   Bit_Region=8
504  //                              |   Bit_Loop=16
505  //                              Bit_CountedLoop=32
506
507  #define DEFINE_CLASS_ID(cl, supcl, subn) \
508  Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
509  Class_##cl = Class_##supcl + Bit_##cl , \
510  ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
511
512  // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
513  // so that it's values fits into 16 bits.
514  enum NodeClasses {
515    Bit_Node   = 0x0000,
516    Class_Node = 0x0000,
517    ClassMask_Node = 0xFFFF,
518
519    DEFINE_CLASS_ID(Multi, Node, 0)
520      DEFINE_CLASS_ID(SafePoint, Multi, 0)
521        DEFINE_CLASS_ID(Call,      SafePoint, 0)
522          DEFINE_CLASS_ID(CallJava,         Call, 0)
523            DEFINE_CLASS_ID(CallStaticJava,   CallJava, 0)
524            DEFINE_CLASS_ID(CallDynamicJava,  CallJava, 1)
525          DEFINE_CLASS_ID(CallRuntime,      Call, 1)
526            DEFINE_CLASS_ID(CallLeaf,         CallRuntime, 0)
527          DEFINE_CLASS_ID(Allocate,         Call, 2)
528            DEFINE_CLASS_ID(AllocateArray,    Allocate, 0)
529          DEFINE_CLASS_ID(AbstractLock,     Call, 3)
530            DEFINE_CLASS_ID(Lock,             AbstractLock, 0)
531            DEFINE_CLASS_ID(Unlock,           AbstractLock, 1)
532      DEFINE_CLASS_ID(MultiBranch, Multi, 1)
533        DEFINE_CLASS_ID(PCTable,     MultiBranch, 0)
534          DEFINE_CLASS_ID(Catch,       PCTable, 0)
535          DEFINE_CLASS_ID(Jump,        PCTable, 1)
536        DEFINE_CLASS_ID(If,          MultiBranch, 1)
537          DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
538        DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
539      DEFINE_CLASS_ID(Start,       Multi, 2)
540      DEFINE_CLASS_ID(MemBar,      Multi, 3)
541        DEFINE_CLASS_ID(Initialize,    MemBar, 0)
542
543    DEFINE_CLASS_ID(Mach,  Node, 1)
544      DEFINE_CLASS_ID(MachReturn, Mach, 0)
545        DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
546          DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
547            DEFINE_CLASS_ID(MachCallJava,         MachCall, 0)
548              DEFINE_CLASS_ID(MachCallStaticJava,   MachCallJava, 0)
549              DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1)
550            DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1)
551              DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0)
552      DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
553      DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
554      DEFINE_CLASS_ID(MachIf,        Mach, 3)
555      DEFINE_CLASS_ID(MachTemp,      Mach, 4)
556
557    DEFINE_CLASS_ID(Proj,  Node, 2)
558      DEFINE_CLASS_ID(CatchProj, Proj, 0)
559      DEFINE_CLASS_ID(JumpProj,  Proj, 1)
560      DEFINE_CLASS_ID(IfTrue,    Proj, 2)
561      DEFINE_CLASS_ID(IfFalse,   Proj, 3)
562      DEFINE_CLASS_ID(Parm,      Proj, 4)
563
564    DEFINE_CLASS_ID(Region, Node, 3)
565      DEFINE_CLASS_ID(Loop, Region, 0)
566        DEFINE_CLASS_ID(Root,        Loop, 0)
567        DEFINE_CLASS_ID(CountedLoop, Loop, 1)
568
569    DEFINE_CLASS_ID(Sub,   Node, 4)
570      DEFINE_CLASS_ID(Cmp,   Sub, 0)
571        DEFINE_CLASS_ID(FastLock,   Cmp, 0)
572        DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
573
574    DEFINE_CLASS_ID(Type,  Node, 5)
575      DEFINE_CLASS_ID(Phi,   Type, 0)
576      DEFINE_CLASS_ID(ConstraintCast, Type, 1)
577      DEFINE_CLASS_ID(CheckCastPP, Type, 2)
578      DEFINE_CLASS_ID(CMove, Type, 3)
579      DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
580
581    DEFINE_CLASS_ID(Mem,   Node, 6)
582      DEFINE_CLASS_ID(Load,  Mem, 0)
583      DEFINE_CLASS_ID(Store, Mem, 1)
584      DEFINE_CLASS_ID(LoadStore, Mem, 2)
585
586    DEFINE_CLASS_ID(MergeMem, Node, 7)
587    DEFINE_CLASS_ID(Bool,     Node, 8)
588    DEFINE_CLASS_ID(AddP,     Node, 9)
589    DEFINE_CLASS_ID(BoxLock,  Node, 10)
590    DEFINE_CLASS_ID(Add,      Node, 11)
591    DEFINE_CLASS_ID(Mul,      Node, 12)
592
593    _max_classes  = ClassMask_Mul
594  };
595  #undef DEFINE_CLASS_ID
596
597  // Flags are sorted by usage frequency.
598  enum NodeFlags {
599    Flag_is_Copy             = 0x01, // should be first bit to avoid shift
600    Flag_is_Call             = Flag_is_Copy << 1,
601    Flag_rematerialize       = Flag_is_Call << 1,
602    Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
603    Flag_is_macro            = Flag_needs_anti_dependence_check << 1,
604    Flag_is_Con              = Flag_is_macro << 1,
605    Flag_is_cisc_alternate   = Flag_is_Con << 1,
606    Flag_is_Branch           = Flag_is_cisc_alternate << 1,
607    Flag_is_block_start      = Flag_is_Branch << 1,
608    Flag_is_Goto             = Flag_is_block_start << 1,
609    Flag_is_dead_loop_safe   = Flag_is_Goto << 1,
610    Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
611    Flag_is_safepoint_node   = Flag_may_be_short_branch << 1,
612    Flag_is_pc_relative      = Flag_is_safepoint_node << 1,
613    Flag_is_Vector           = Flag_is_pc_relative << 1,
614    _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination
615  };
616
617private:
618  jushort _class_id;
619  jushort _flags;
620
621protected:
622  // These methods should be called from constructors only.
623  void init_class_id(jushort c) {
624    assert(c <= _max_classes, "invalid node class");
625    _class_id = c; // cast out const
626  }
627  void init_flags(jushort fl) {
628    assert(fl <= _max_flags, "invalid node flag");
629    _flags |= fl;
630  }
631  void clear_flag(jushort fl) {
632    assert(fl <= _max_flags, "invalid node flag");
633    _flags &= ~fl;
634  }
635
636public:
637  const jushort class_id() const { return _class_id; }
638
639  const jushort flags() const { return _flags; }
640
641  // Return a dense integer opcode number
642  virtual int Opcode() const;
643
644  // Virtual inherited Node size
645  virtual uint size_of() const;
646
647  // Other interesting Node properties
648
649  // Special case: is_Call() returns true for both CallNode and MachCallNode.
650  bool is_Call() const {
651    return (_flags & Flag_is_Call) != 0;
652  }
653
654  CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
655    assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
656    return (CallNode*)this;
657  }
658
659  #define DEFINE_CLASS_QUERY(type) \
660  bool is_##type() const { \
661    return ((_class_id & ClassMask_##type) == Class_##type); \
662  } \
663  type##Node *as_##type() const { \
664    assert(is_##type(), "invalid node class"); \
665    return (type##Node*)this; \
666  }
667
668  DEFINE_CLASS_QUERY(AbstractLock)
669  DEFINE_CLASS_QUERY(Add)
670  DEFINE_CLASS_QUERY(AddP)
671  DEFINE_CLASS_QUERY(Allocate)
672  DEFINE_CLASS_QUERY(AllocateArray)
673  DEFINE_CLASS_QUERY(Bool)
674  DEFINE_CLASS_QUERY(BoxLock)
675  DEFINE_CLASS_QUERY(CallDynamicJava)
676  DEFINE_CLASS_QUERY(CallJava)
677  DEFINE_CLASS_QUERY(CallLeaf)
678  DEFINE_CLASS_QUERY(CallRuntime)
679  DEFINE_CLASS_QUERY(CallStaticJava)
680  DEFINE_CLASS_QUERY(Catch)
681  DEFINE_CLASS_QUERY(CatchProj)
682  DEFINE_CLASS_QUERY(CheckCastPP)
683  DEFINE_CLASS_QUERY(ConstraintCast)
684  DEFINE_CLASS_QUERY(CMove)
685  DEFINE_CLASS_QUERY(Cmp)
686  DEFINE_CLASS_QUERY(CountedLoop)
687  DEFINE_CLASS_QUERY(CountedLoopEnd)
688  DEFINE_CLASS_QUERY(FastLock)
689  DEFINE_CLASS_QUERY(FastUnlock)
690  DEFINE_CLASS_QUERY(If)
691  DEFINE_CLASS_QUERY(IfFalse)
692  DEFINE_CLASS_QUERY(IfTrue)
693  DEFINE_CLASS_QUERY(Initialize)
694  DEFINE_CLASS_QUERY(Jump)
695  DEFINE_CLASS_QUERY(JumpProj)
696  DEFINE_CLASS_QUERY(Load)
697  DEFINE_CLASS_QUERY(LoadStore)
698  DEFINE_CLASS_QUERY(Lock)
699  DEFINE_CLASS_QUERY(Loop)
700  DEFINE_CLASS_QUERY(Mach)
701  DEFINE_CLASS_QUERY(MachCall)
702  DEFINE_CLASS_QUERY(MachCallDynamicJava)
703  DEFINE_CLASS_QUERY(MachCallJava)
704  DEFINE_CLASS_QUERY(MachCallLeaf)
705  DEFINE_CLASS_QUERY(MachCallRuntime)
706  DEFINE_CLASS_QUERY(MachCallStaticJava)
707  DEFINE_CLASS_QUERY(MachIf)
708  DEFINE_CLASS_QUERY(MachNullCheck)
709  DEFINE_CLASS_QUERY(MachReturn)
710  DEFINE_CLASS_QUERY(MachSafePoint)
711  DEFINE_CLASS_QUERY(MachSpillCopy)
712  DEFINE_CLASS_QUERY(MachTemp)
713  DEFINE_CLASS_QUERY(Mem)
714  DEFINE_CLASS_QUERY(MemBar)
715  DEFINE_CLASS_QUERY(MergeMem)
716  DEFINE_CLASS_QUERY(Mul)
717  DEFINE_CLASS_QUERY(Multi)
718  DEFINE_CLASS_QUERY(MultiBranch)
719  DEFINE_CLASS_QUERY(Parm)
720  DEFINE_CLASS_QUERY(PCTable)
721  DEFINE_CLASS_QUERY(Phi)
722  DEFINE_CLASS_QUERY(Proj)
723  DEFINE_CLASS_QUERY(Region)
724  DEFINE_CLASS_QUERY(Root)
725  DEFINE_CLASS_QUERY(SafePoint)
726  DEFINE_CLASS_QUERY(SafePointScalarObject)
727  DEFINE_CLASS_QUERY(Start)
728  DEFINE_CLASS_QUERY(Store)
729  DEFINE_CLASS_QUERY(Sub)
730  DEFINE_CLASS_QUERY(Type)
731  DEFINE_CLASS_QUERY(Unlock)
732
733  #undef DEFINE_CLASS_QUERY
734
735  // duplicate of is_MachSpillCopy()
736  bool is_SpillCopy () const {
737    return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
738  }
739
740  bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
741  bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
742  // The data node which is safe to leave in dead loop during IGVN optimization.
743  bool is_dead_loop_safe() const {
744    return is_Phi() || is_Proj() ||
745           (_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0;
746  }
747
748  // is_Copy() returns copied edge index (0 or 1)
749  uint is_Copy() const { return (_flags & Flag_is_Copy); }
750
751  virtual bool is_CFG() const { return false; }
752
753  // If this node is control-dependent on a test, can it be
754  // rerouted to a dominating equivalent test?  This is usually
755  // true of non-CFG nodes, but can be false for operations which
756  // depend for their correct sequencing on more than one test.
757  // (In that case, hoisting to a dominating test may silently
758  // skip some other important test.)
759  virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
760
761  // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd'
762  bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; }
763
764  // When building basic blocks, I need to have a notion of block beginning
765  // Nodes, next block selector Nodes (block enders), and next block
766  // projections.  These calls need to work on their machine equivalents.  The
767  // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
768  bool is_block_start() const {
769    if ( is_Region() )
770      return this == (const Node*)in(0);
771    else
772      return (_flags & Flag_is_block_start) != 0;
773  }
774
775  // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
776  // Goto and Return.  This call also returns the block ending Node.
777  virtual const Node *is_block_proj() const;
778
779  // The node is a "macro" node which needs to be expanded before matching
780  bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
781
782  // Value is a vector of primitive values
783  bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; }
784
785//----------------- Optimization
786
787  // Get the worst-case Type output for this Node.
788  virtual const class Type *bottom_type() const;
789
790  // If we find a better type for a node, try to record it permanently.
791  // Return true if this node actually changed.
792  // Be sure to do the hash_delete game in the "rehash" variant.
793  void raise_bottom_type(const Type* new_type);
794
795  // Get the address type with which this node uses and/or defs memory,
796  // or NULL if none.  The address type is conservatively wide.
797  // Returns non-null for calls, membars, loads, stores, etc.
798  // Returns TypePtr::BOTTOM if the node touches memory "broadly".
799  virtual const class TypePtr *adr_type() const { return NULL; }
800
801  // Return an existing node which computes the same function as this node.
802  // The optimistic combined algorithm requires this to return a Node which
803  // is a small number of steps away (e.g., one of my inputs).
804  virtual Node *Identity( PhaseTransform *phase );
805
806  // Return the set of values this Node can take on at runtime.
807  virtual const Type *Value( PhaseTransform *phase ) const;
808
809  // Return a node which is more "ideal" than the current node.
810  // The invariants on this call are subtle.  If in doubt, read the
811  // treatise in node.cpp above the default implemention AND TEST WITH
812  // +VerifyIterativeGVN!
813  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
814
815  // Some nodes have specific Ideal subgraph transformations only if they are
816  // unique users of specific nodes. Such nodes should be put on IGVN worklist
817  // for the transformations to happen.
818  bool has_special_unique_user() const;
819
820protected:
821  bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
822public:
823
824  // Idealize graph, using DU info.  Done after constant propagation
825  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
826
827  // See if there is valid pipeline info
828  static  const Pipeline *pipeline_class();
829  virtual const Pipeline *pipeline() const;
830
831  // Compute the latency from the def to this instruction of the ith input node
832  uint latency(uint i);
833
834  // Hash & compare functions, for pessimistic value numbering
835
836  // If the hash function returns the special sentinel value NO_HASH,
837  // the node is guaranteed never to compare equal to any other node.
838  // If we accidently generate a hash with value NO_HASH the node
839  // won't go into the table and we'll lose a little optimization.
840  enum { NO_HASH = 0 };
841  virtual uint hash() const;
842  virtual uint cmp( const Node &n ) const;
843
844  // Operation appears to be iteratively computed (such as an induction variable)
845  // It is possible for this operation to return false for a loop-varying
846  // value, if it appears (by local graph inspection) to be computed by a simple conditional.
847  bool is_iteratively_computed();
848
849  // Determine if a node is Counted loop induction variable.
850  // The method is defined in loopnode.cpp.
851  const Node* is_loop_iv() const;
852
853  // Return a node with opcode "opc" and same inputs as "this" if one can
854  // be found; Otherwise return NULL;
855  Node* find_similar(int opc);
856
857  // Return the unique control out if only one. Null if none or more than one.
858  Node* unique_ctrl_out();
859
860//----------------- Code Generation
861
862  // Ideal register class for Matching.  Zero means unmatched instruction
863  // (these are cloned instead of converted to machine nodes).
864  virtual uint ideal_reg() const;
865
866  static const uint NotAMachineReg;   // must be > max. machine register
867
868  // Do we Match on this edge index or not?  Generally false for Control
869  // and true for everything else.  Weird for calls & returns.
870  virtual uint match_edge(uint idx) const;
871
872  // Register class output is returned in
873  virtual const RegMask &out_RegMask() const;
874  // Register class input is expected in
875  virtual const RegMask &in_RegMask(uint) const;
876  // Should we clone rather than spill this instruction?
877  bool rematerialize() const;
878
879  // Return JVM State Object if this Node carries debug info, or NULL otherwise
880  virtual JVMState* jvms() const;
881
882  // Print as assembly
883  virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
884  // Emit bytes starting at parameter 'ptr'
885  // Bump 'ptr' by the number of output bytes
886  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
887  // Size of instruction in bytes
888  virtual uint size(PhaseRegAlloc *ra_) const;
889
890  // Convenience function to extract an integer constant from a node.
891  // If it is not an integer constant (either Con, CastII, or Mach),
892  // return value_if_unknown.
893  jint find_int_con(jint value_if_unknown) const {
894    const TypeInt* t = find_int_type();
895    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
896  }
897  // Return the constant, knowing it is an integer constant already
898  jint get_int() const {
899    const TypeInt* t = find_int_type();
900    guarantee(t != NULL, "must be con");
901    return t->get_con();
902  }
903  // Here's where the work is done.  Can produce non-constant int types too.
904  const TypeInt* find_int_type() const;
905
906  // Same thing for long (and intptr_t, via type.hpp):
907  jlong get_long() const {
908    const TypeLong* t = find_long_type();
909    guarantee(t != NULL, "must be con");
910    return t->get_con();
911  }
912  jlong find_long_con(jint value_if_unknown) const {
913    const TypeLong* t = find_long_type();
914    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
915  }
916  const TypeLong* find_long_type() const;
917
918  // These guys are called by code generated by ADLC:
919  intptr_t get_ptr() const;
920  intptr_t get_narrowcon() const;
921  jdouble getd() const;
922  jfloat getf() const;
923
924  // Nodes which are pinned into basic blocks
925  virtual bool pinned() const { return false; }
926
927  // Nodes which use memory without consuming it, hence need antidependences
928  // More specifically, needs_anti_dependence_check returns true iff the node
929  // (a) does a load, and (b) does not perform a store (except perhaps to a
930  // stack slot or some other unaliased location).
931  bool needs_anti_dependence_check() const;
932
933  // Return which operand this instruction may cisc-spill. In other words,
934  // return operand position that can convert from reg to memory access
935  virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
936  bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
937
938//----------------- Graph walking
939public:
940  // Walk and apply member functions recursively.
941  // Supplied (this) pointer is root.
942  void walk(NFunc pre, NFunc post, void *env);
943  static void nop(Node &, void*); // Dummy empty function
944  static void packregion( Node &n, void* );
945private:
946  void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
947
948//----------------- Printing, etc
949public:
950#ifndef PRODUCT
951  Node* find(int idx) const;         // Search the graph for the given idx.
952  Node* find_ctrl(int idx) const;    // Search control ancestors for the given idx.
953  void dump() const;                 // Print this node,
954  void dump(int depth) const;        // Print this node, recursively to depth d
955  void dump_ctrl(int depth) const;   // Print control nodes, to depth d
956  virtual void dump_req() const;     // Print required-edge info
957  virtual void dump_prec() const;    // Print precedence-edge info
958  virtual void dump_out() const;     // Print the output edge info
959  virtual void dump_spec(outputStream *st) const {}; // Print per-node info
960  void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
961  void verify() const;               // Check Def-Use info for my subgraph
962  static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
963
964  // This call defines a class-unique string used to identify class instances
965  virtual const char *Name() const;
966
967  void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
968  // RegMask Print Functions
969  void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
970  void dump_out_regmask() { out_RegMask().dump(); }
971  static int _in_dump_cnt;
972  static bool in_dump() { return _in_dump_cnt > 0; }
973  void fast_dump() const {
974    tty->print("%4d: %-17s", _idx, Name());
975    for (uint i = 0; i < len(); i++)
976      if (in(i))
977        tty->print(" %4d", in(i)->_idx);
978      else
979        tty->print(" NULL");
980    tty->print("\n");
981  }
982#endif
983#ifdef ASSERT
984  void verify_construction();
985  bool verify_jvms(const JVMState* jvms) const;
986  int  _debug_idx;                     // Unique value assigned to every node.
987  int   debug_idx() const              { return _debug_idx; }
988  void  set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
989
990  Node* _debug_orig;                   // Original version of this, if any.
991  Node*  debug_orig() const            { return _debug_orig; }
992  void   set_debug_orig(Node* orig);   // _debug_orig = orig
993
994  int        _hash_lock;               // Barrier to modifications of nodes in the hash table
995  void  enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
996  void   exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
997
998  static void init_NodeProperty();
999
1000  #if OPTO_DU_ITERATOR_ASSERT
1001  const Node* _last_del;               // The last deleted node.
1002  uint        _del_tick;               // Bumped when a deletion happens..
1003  #endif
1004#endif
1005};
1006
1007//-----------------------------------------------------------------------------
1008// Iterators over DU info, and associated Node functions.
1009
1010#if OPTO_DU_ITERATOR_ASSERT
1011
1012// Common code for assertion checking on DU iterators.
1013class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
1014#ifdef ASSERT
1015 protected:
1016  bool         _vdui;               // cached value of VerifyDUIterators
1017  const Node*  _node;               // the node containing the _out array
1018  uint         _outcnt;             // cached node->_outcnt
1019  uint         _del_tick;           // cached node->_del_tick
1020  Node*        _last;               // last value produced by the iterator
1021
1022  void sample(const Node* node);    // used by c'tor to set up for verifies
1023  void verify(const Node* node, bool at_end_ok = false);
1024  void verify_resync();
1025  void reset(const DUIterator_Common& that);
1026
1027// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1028  #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1029#else
1030  #define I_VDUI_ONLY(i,x) { }
1031#endif //ASSERT
1032};
1033
1034#define VDUI_ONLY(x)     I_VDUI_ONLY(*this, x)
1035
1036// Default DU iterator.  Allows appends onto the out array.
1037// Allows deletion from the out array only at the current point.
1038// Usage:
1039//  for (DUIterator i = x->outs(); x->has_out(i); i++) {
1040//    Node* y = x->out(i);
1041//    ...
1042//  }
1043// Compiles in product mode to a unsigned integer index, which indexes
1044// onto a repeatedly reloaded base pointer of x->_out.  The loop predicate
1045// also reloads x->_outcnt.  If you delete, you must perform "--i" just
1046// before continuing the loop.  You must delete only the last-produced
1047// edge.  You must delete only a single copy of the last-produced edge,
1048// or else you must delete all copies at once (the first time the edge
1049// is produced by the iterator).
1050class DUIterator : public DUIterator_Common {
1051  friend class Node;
1052
1053  // This is the index which provides the product-mode behavior.
1054  // Whatever the product-mode version of the system does to the
1055  // DUI index is done to this index.  All other fields in
1056  // this class are used only for assertion checking.
1057  uint         _idx;
1058
1059  #ifdef ASSERT
1060  uint         _refresh_tick;    // Records the refresh activity.
1061
1062  void sample(const Node* node); // Initialize _refresh_tick etc.
1063  void verify(const Node* node, bool at_end_ok = false);
1064  void verify_increment();       // Verify an increment operation.
1065  void verify_resync();          // Verify that we can back up over a deletion.
1066  void verify_finish();          // Verify that the loop terminated properly.
1067  void refresh();                // Resample verification info.
1068  void reset(const DUIterator& that);  // Resample after assignment.
1069  #endif
1070
1071  DUIterator(const Node* node, int dummy_to_avoid_conversion)
1072    { _idx = 0;                         debug_only(sample(node)); }
1073
1074 public:
1075  // initialize to garbage; clear _vdui to disable asserts
1076  DUIterator()
1077    { /*initialize to garbage*/         debug_only(_vdui = false); }
1078
1079  void operator++(int dummy_to_specify_postfix_op)
1080    { _idx++;                           VDUI_ONLY(verify_increment()); }
1081
1082  void operator--()
1083    { VDUI_ONLY(verify_resync());       --_idx; }
1084
1085  ~DUIterator()
1086    { VDUI_ONLY(verify_finish()); }
1087
1088  void operator=(const DUIterator& that)
1089    { _idx = that._idx;                 debug_only(reset(that)); }
1090};
1091
1092DUIterator Node::outs() const
1093  { return DUIterator(this, 0); }
1094DUIterator& Node::refresh_out_pos(DUIterator& i) const
1095  { I_VDUI_ONLY(i, i.refresh());        return i; }
1096bool Node::has_out(DUIterator& i) const
1097  { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1098Node*    Node::out(DUIterator& i) const
1099  { I_VDUI_ONLY(i, i.verify(this));     return debug_only(i._last=) _out[i._idx]; }
1100
1101
1102// Faster DU iterator.  Disallows insertions into the out array.
1103// Allows deletion from the out array only at the current point.
1104// Usage:
1105//  for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1106//    Node* y = x->fast_out(i);
1107//    ...
1108//  }
1109// Compiles in product mode to raw Node** pointer arithmetic, with
1110// no reloading of pointers from the original node x.  If you delete,
1111// you must perform "--i; --imax" just before continuing the loop.
1112// If you delete multiple copies of the same edge, you must decrement
1113// imax, but not i, multiple times:  "--i, imax -= num_edges".
1114class DUIterator_Fast : public DUIterator_Common {
1115  friend class Node;
1116  friend class DUIterator_Last;
1117
1118  // This is the pointer which provides the product-mode behavior.
1119  // Whatever the product-mode version of the system does to the
1120  // DUI pointer is done to this pointer.  All other fields in
1121  // this class are used only for assertion checking.
1122  Node**       _outp;
1123
1124  #ifdef ASSERT
1125  void verify(const Node* node, bool at_end_ok = false);
1126  void verify_limit();
1127  void verify_resync();
1128  void verify_relimit(uint n);
1129  void reset(const DUIterator_Fast& that);
1130  #endif
1131
1132  // Note:  offset must be signed, since -1 is sometimes passed
1133  DUIterator_Fast(const Node* node, ptrdiff_t offset)
1134    { _outp = node->_out + offset;      debug_only(sample(node)); }
1135
1136 public:
1137  // initialize to garbage; clear _vdui to disable asserts
1138  DUIterator_Fast()
1139    { /*initialize to garbage*/         debug_only(_vdui = false); }
1140
1141  void operator++(int dummy_to_specify_postfix_op)
1142    { _outp++;                          VDUI_ONLY(verify(_node, true)); }
1143
1144  void operator--()
1145    { VDUI_ONLY(verify_resync());       --_outp; }
1146
1147  void operator-=(uint n)   // applied to the limit only
1148    { _outp -= n;           VDUI_ONLY(verify_relimit(n));  }
1149
1150  bool operator<(DUIterator_Fast& limit) {
1151    I_VDUI_ONLY(*this, this->verify(_node, true));
1152    I_VDUI_ONLY(limit, limit.verify_limit());
1153    return _outp < limit._outp;
1154  }
1155
1156  void operator=(const DUIterator_Fast& that)
1157    { _outp = that._outp;               debug_only(reset(that)); }
1158};
1159
1160DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1161  // Assign a limit pointer to the reference argument:
1162  imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1163  // Return the base pointer:
1164  return DUIterator_Fast(this, 0);
1165}
1166Node* Node::fast_out(DUIterator_Fast& i) const {
1167  I_VDUI_ONLY(i, i.verify(this));
1168  return debug_only(i._last=) *i._outp;
1169}
1170
1171
1172// Faster DU iterator.  Requires each successive edge to be removed.
1173// Does not allow insertion of any edges.
1174// Usage:
1175//  for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1176//    Node* y = x->last_out(i);
1177//    ...
1178//  }
1179// Compiles in product mode to raw Node** pointer arithmetic, with
1180// no reloading of pointers from the original node x.
1181class DUIterator_Last : private DUIterator_Fast {
1182  friend class Node;
1183
1184  #ifdef ASSERT
1185  void verify(const Node* node, bool at_end_ok = false);
1186  void verify_limit();
1187  void verify_step(uint num_edges);
1188  #endif
1189
1190  // Note:  offset must be signed, since -1 is sometimes passed
1191  DUIterator_Last(const Node* node, ptrdiff_t offset)
1192    : DUIterator_Fast(node, offset) { }
1193
1194  void operator++(int dummy_to_specify_postfix_op) {} // do not use
1195  void operator<(int)                              {} // do not use
1196
1197 public:
1198  DUIterator_Last() { }
1199  // initialize to garbage
1200
1201  void operator--()
1202    { _outp--;              VDUI_ONLY(verify_step(1));  }
1203
1204  void operator-=(uint n)
1205    { _outp -= n;           VDUI_ONLY(verify_step(n));  }
1206
1207  bool operator>=(DUIterator_Last& limit) {
1208    I_VDUI_ONLY(*this, this->verify(_node, true));
1209    I_VDUI_ONLY(limit, limit.verify_limit());
1210    return _outp >= limit._outp;
1211  }
1212
1213  void operator=(const DUIterator_Last& that)
1214    { DUIterator_Fast::operator=(that); }
1215};
1216
1217DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1218  // Assign a limit pointer to the reference argument:
1219  imin = DUIterator_Last(this, 0);
1220  // Return the initial pointer:
1221  return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1222}
1223Node* Node::last_out(DUIterator_Last& i) const {
1224  I_VDUI_ONLY(i, i.verify(this));
1225  return debug_only(i._last=) *i._outp;
1226}
1227
1228#endif //OPTO_DU_ITERATOR_ASSERT
1229
1230#undef I_VDUI_ONLY
1231#undef VDUI_ONLY
1232
1233
1234//-----------------------------------------------------------------------------
1235// Map dense integer indices to Nodes.  Uses classic doubling-array trick.
1236// Abstractly provides an infinite array of Node*'s, initialized to NULL.
1237// Note that the constructor just zeros things, and since I use Arena
1238// allocation I do not need a destructor to reclaim storage.
1239class Node_Array : public ResourceObj {
1240protected:
1241  Arena *_a;                    // Arena to allocate in
1242  uint   _max;
1243  Node **_nodes;
1244  void   grow( uint i );        // Grow array node to fit
1245public:
1246  Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
1247    _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
1248    for( int i = 0; i < OptoNodeListSize; i++ ) {
1249      _nodes[i] = NULL;
1250    }
1251  }
1252
1253  Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1254  Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1255  { return (i<_max) ? _nodes[i] : (Node*)NULL; }
1256  Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
1257  Node **adr() { return _nodes; }
1258  // Extend the mapping: index i maps to Node *n.
1259  void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1260  void insert( uint i, Node *n );
1261  void remove( uint i );        // Remove, preserving order
1262  void sort( C_sort_func_t func);
1263  void reset( Arena *new_a );   // Zap mapping to empty; reclaim storage
1264  void clear();                 // Set all entries to NULL, keep storage
1265  uint Size() const { return _max; }
1266  void dump() const;
1267};
1268
1269class Node_List : public Node_Array {
1270  uint _cnt;
1271public:
1272  Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
1273  Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
1274  void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1275  void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1276  void push( Node *b ) { map(_cnt++,b); }
1277  void yank( Node *n );         // Find and remove
1278  Node *pop() { return _nodes[--_cnt]; }
1279  Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
1280  void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1281  uint size() const { return _cnt; }
1282  void dump() const;
1283};
1284
1285//------------------------------Unique_Node_List-------------------------------
1286class Unique_Node_List : public Node_List {
1287  VectorSet _in_worklist;
1288  uint _clock_index;            // Index in list where to pop from next
1289public:
1290  Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
1291  Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1292
1293  void remove( Node *n );
1294  bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1295  VectorSet &member_set(){ return _in_worklist; }
1296
1297  void push( Node *b ) {
1298    if( !_in_worklist.test_set(b->_idx) )
1299      Node_List::push(b);
1300  }
1301  Node *pop() {
1302    if( _clock_index >= size() ) _clock_index = 0;
1303    Node *b = at(_clock_index);
1304    map( _clock_index++, Node_List::pop());
1305    _in_worklist >>= b->_idx;
1306    return b;
1307  }
1308  Node *remove( uint i ) {
1309    Node *b = Node_List::at(i);
1310    _in_worklist >>= b->_idx;
1311    map(i,Node_List::pop());
1312    return b;
1313  }
1314  void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
1315  void  clear() {
1316    _in_worklist.Clear();        // Discards storage but grows automatically
1317    Node_List::clear();
1318    _clock_index = 0;
1319  }
1320
1321  // Used after parsing to remove useless nodes before Iterative GVN
1322  void remove_useless_nodes(VectorSet &useful);
1323
1324#ifndef PRODUCT
1325  void print_set() const { _in_worklist.print(); }
1326#endif
1327};
1328
1329// Inline definition of Compile::record_for_igvn must be deferred to this point.
1330inline void Compile::record_for_igvn(Node* n) {
1331  _for_igvn->push(n);
1332}
1333
1334//------------------------------Node_Stack-------------------------------------
1335class Node_Stack {
1336protected:
1337  struct INode {
1338    Node *node; // Processed node
1339    uint  indx; // Index of next node's child
1340  };
1341  INode *_inode_top; // tos, stack grows up
1342  INode *_inode_max; // End of _inodes == _inodes + _max
1343  INode *_inodes;    // Array storage for the stack
1344  Arena *_a;         // Arena to allocate in
1345  void grow();
1346public:
1347  Node_Stack(int size) {
1348    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1349    _a = Thread::current()->resource_area();
1350    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1351    _inode_max = _inodes + max;
1352    _inode_top = _inodes - 1; // stack is empty
1353  }
1354
1355  Node_Stack(Arena *a, int size) : _a(a) {
1356    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1357    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1358    _inode_max = _inodes + max;
1359    _inode_top = _inodes - 1; // stack is empty
1360  }
1361
1362  void pop() {
1363    assert(_inode_top >= _inodes, "node stack underflow");
1364    --_inode_top;
1365  }
1366  void push(Node *n, uint i) {
1367    ++_inode_top;
1368    if (_inode_top >= _inode_max) grow();
1369    INode *top = _inode_top; // optimization
1370    top->node = n;
1371    top->indx = i;
1372  }
1373  Node *node() const {
1374    return _inode_top->node;
1375  }
1376  Node* node_at(uint i) const {
1377    assert(_inodes + i <= _inode_top, "in range");
1378    return _inodes[i].node;
1379  }
1380  uint index() const {
1381    return _inode_top->indx;
1382  }
1383  void set_node(Node *n) {
1384    _inode_top->node = n;
1385  }
1386  void set_index(uint i) {
1387    _inode_top->indx = i;
1388  }
1389  uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes,  sizeof(INode)); } // Max size
1390  uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes,  sizeof(INode)); } // Current size
1391  bool is_nonempty() const { return (_inode_top >= _inodes); }
1392  bool is_empty() const { return (_inode_top < _inodes); }
1393  void clear() { _inode_top = _inodes - 1; } // retain storage
1394};
1395
1396
1397//-----------------------------Node_Notes--------------------------------------
1398// Debugging or profiling annotations loosely and sparsely associated
1399// with some nodes.  See Compile::node_notes_at for the accessor.
1400class Node_Notes VALUE_OBJ_CLASS_SPEC {
1401  JVMState* _jvms;
1402
1403public:
1404  Node_Notes(JVMState* jvms = NULL) {
1405    _jvms = jvms;
1406  }
1407
1408  JVMState* jvms()            { return _jvms; }
1409  void  set_jvms(JVMState* x) {        _jvms = x; }
1410
1411  // True if there is nothing here.
1412  bool is_clear() {
1413    return (_jvms == NULL);
1414  }
1415
1416  // Make there be nothing here.
1417  void clear() {
1418    _jvms = NULL;
1419  }
1420
1421  // Make a new, clean node notes.
1422  static Node_Notes* make(Compile* C) {
1423    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1424    nn->clear();
1425    return nn;
1426  }
1427
1428  Node_Notes* clone(Compile* C) {
1429    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1430    (*nn) = (*this);
1431    return nn;
1432  }
1433
1434  // Absorb any information from source.
1435  bool update_from(Node_Notes* source) {
1436    bool changed = false;
1437    if (source != NULL) {
1438      if (source->jvms() != NULL) {
1439        set_jvms(source->jvms());
1440        changed = true;
1441      }
1442    }
1443    return changed;
1444  }
1445};
1446
1447// Inlined accessors for Compile::node_nodes that require the preceding class:
1448inline Node_Notes*
1449Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1450                           int idx, bool can_grow) {
1451  assert(idx >= 0, "oob");
1452  int block_idx = (idx >> _log2_node_notes_block_size);
1453  int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
1454  if (grow_by >= 0) {
1455    if (!can_grow)  return NULL;
1456    grow_node_notes(arr, grow_by + 1);
1457  }
1458  // (Every element of arr is a sub-array of length _node_notes_block_size.)
1459  return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1460}
1461
1462inline bool
1463Compile::set_node_notes_at(int idx, Node_Notes* value) {
1464  if (value == NULL || value->is_clear())
1465    return false;  // nothing to write => write nothing
1466  Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1467  assert(loc != NULL, "");
1468  return loc->update_from(value);
1469}
1470
1471
1472//------------------------------TypeNode---------------------------------------
1473// Node with a Type constant.
1474class TypeNode : public Node {
1475protected:
1476  virtual uint hash() const;    // Check the type
1477  virtual uint cmp( const Node &n ) const;
1478  virtual uint size_of() const; // Size is bigger
1479  const Type* const _type;
1480public:
1481  void set_type(const Type* t) {
1482    assert(t != NULL, "sanity");
1483    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
1484    *(const Type**)&_type = t;   // cast away const-ness
1485    // If this node is in the hash table, make sure it doesn't need a rehash.
1486    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
1487  }
1488  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
1489  TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1490    init_class_id(Class_Type);
1491  }
1492  virtual const Type *Value( PhaseTransform *phase ) const;
1493  virtual const Type *bottom_type() const;
1494  virtual       uint  ideal_reg() const;
1495#ifndef PRODUCT
1496  virtual void dump_spec(outputStream *st) const;
1497#endif
1498};
1499