node.hpp revision 196:d1605aabd0a1
1/* 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25// Portions of code courtesy of Clifford Click 26 27// Optimization - Graph Style 28 29 30class AbstractLockNode; 31class AddNode; 32class AddPNode; 33class AliasInfo; 34class AllocateArrayNode; 35class AllocateNode; 36class Block; 37class Block_Array; 38class BoolNode; 39class BoxLockNode; 40class CMoveNode; 41class CallDynamicJavaNode; 42class CallJavaNode; 43class CallLeafNode; 44class CallNode; 45class CallRuntimeNode; 46class CallStaticJavaNode; 47class CatchNode; 48class CatchProjNode; 49class CheckCastPPNode; 50class CmpNode; 51class CodeBuffer; 52class ConstraintCastNode; 53class ConNode; 54class CountedLoopNode; 55class CountedLoopEndNode; 56class DecodeNNode; 57class EncodePNode; 58class FastLockNode; 59class FastUnlockNode; 60class IfNode; 61class InitializeNode; 62class JVMState; 63class JumpNode; 64class JumpProjNode; 65class LoadNode; 66class LoadStoreNode; 67class LockNode; 68class LoopNode; 69class MachCallDynamicJavaNode; 70class MachCallJavaNode; 71class MachCallLeafNode; 72class MachCallNode; 73class MachCallRuntimeNode; 74class MachCallStaticJavaNode; 75class MachIfNode; 76class MachNode; 77class MachNullCheckNode; 78class MachReturnNode; 79class MachSafePointNode; 80class MachSpillCopyNode; 81class MachTempNode; 82class Matcher; 83class MemBarNode; 84class MemNode; 85class MergeMemNode; 86class MulNode; 87class MultiNode; 88class MultiBranchNode; 89class NeverBranchNode; 90class Node; 91class Node_Array; 92class Node_List; 93class Node_Stack; 94class NullCheckNode; 95class OopMap; 96class ParmNode; 97class PCTableNode; 98class PhaseCCP; 99class PhaseGVN; 100class PhaseIterGVN; 101class PhaseRegAlloc; 102class PhaseTransform; 103class PhaseValues; 104class PhiNode; 105class Pipeline; 106class ProjNode; 107class RegMask; 108class RegionNode; 109class RootNode; 110class SafePointNode; 111class SafePointScalarObjectNode; 112class StartNode; 113class State; 114class StoreNode; 115class SubNode; 116class Type; 117class TypeNode; 118class UnlockNode; 119class VectorSet; 120class IfTrueNode; 121class IfFalseNode; 122typedef void (*NFunc)(Node&,void*); 123extern "C" { 124 typedef int (*C_sort_func_t)(const void *, const void *); 125} 126 127// The type of all node counts and indexes. 128// It must hold at least 16 bits, but must also be fast to load and store. 129// This type, if less than 32 bits, could limit the number of possible nodes. 130// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) 131typedef unsigned int node_idx_t; 132 133 134#ifndef OPTO_DU_ITERATOR_ASSERT 135#ifdef ASSERT 136#define OPTO_DU_ITERATOR_ASSERT 1 137#else 138#define OPTO_DU_ITERATOR_ASSERT 0 139#endif 140#endif //OPTO_DU_ITERATOR_ASSERT 141 142#if OPTO_DU_ITERATOR_ASSERT 143class DUIterator; 144class DUIterator_Fast; 145class DUIterator_Last; 146#else 147typedef uint DUIterator; 148typedef Node** DUIterator_Fast; 149typedef Node** DUIterator_Last; 150#endif 151 152// Node Sentinel 153#define NodeSentinel (Node*)-1 154 155// Unknown count frequency 156#define COUNT_UNKNOWN (-1.0f) 157 158//------------------------------Node------------------------------------------- 159// Nodes define actions in the program. They create values, which have types. 160// They are both vertices in a directed graph and program primitives. Nodes 161// are labeled; the label is the "opcode", the primitive function in the lambda 162// calculus sense that gives meaning to the Node. Node inputs are ordered (so 163// that "a-b" is different from "b-a"). The inputs to a Node are the inputs to 164// the Node's function. These inputs also define a Type equation for the Node. 165// Solving these Type equations amounts to doing dataflow analysis. 166// Control and data are uniformly represented in the graph. Finally, Nodes 167// have a unique dense integer index which is used to index into side arrays 168// whenever I have phase-specific information. 169 170class Node { 171 // Lots of restrictions on cloning Nodes 172 Node(const Node&); // not defined; linker error to use these 173 Node &operator=(const Node &rhs); 174 175public: 176 friend class Compile; 177 #if OPTO_DU_ITERATOR_ASSERT 178 friend class DUIterator_Common; 179 friend class DUIterator; 180 friend class DUIterator_Fast; 181 friend class DUIterator_Last; 182 #endif 183 184 // Because Nodes come and go, I define an Arena of Node structures to pull 185 // from. This should allow fast access to node creation & deletion. This 186 // field is a local cache of a value defined in some "program fragment" for 187 // which these Nodes are just a part of. 188 189 // New Operator that takes a Compile pointer, this will eventually 190 // be the "new" New operator. 191 inline void* operator new( size_t x, Compile* C) { 192 Node* n = (Node*)C->node_arena()->Amalloc_D(x); 193#ifdef ASSERT 194 n->_in = (Node**)n; // magic cookie for assertion check 195#endif 196 n->_out = (Node**)C; 197 return (void*)n; 198 } 199 200 // New Operator that takes a Compile pointer, this will eventually 201 // be the "new" New operator. 202 inline void* operator new( size_t x, Compile* C, int y) { 203 Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*)); 204 n->_in = (Node**)(((char*)n) + x); 205#ifdef ASSERT 206 n->_in[y-1] = n; // magic cookie for assertion check 207#endif 208 n->_out = (Node**)C; 209 return (void*)n; 210 } 211 212 // Delete is a NOP 213 void operator delete( void *ptr ) {} 214 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage 215 void destruct(); 216 217 // Create a new Node. Required is the number is of inputs required for 218 // semantic correctness. 219 Node( uint required ); 220 221 // Create a new Node with given input edges. 222 // This version requires use of the "edge-count" new. 223 // E.g. new (C,3) FooNode( C, NULL, left, right ); 224 Node( Node *n0 ); 225 Node( Node *n0, Node *n1 ); 226 Node( Node *n0, Node *n1, Node *n2 ); 227 Node( Node *n0, Node *n1, Node *n2, Node *n3 ); 228 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); 229 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); 230 Node( Node *n0, Node *n1, Node *n2, Node *n3, 231 Node *n4, Node *n5, Node *n6 ); 232 233 // Clone an inherited Node given only the base Node type. 234 Node* clone() const; 235 236 // Clone a Node, immediately supplying one or two new edges. 237 // The first and second arguments, if non-null, replace in(1) and in(2), 238 // respectively. 239 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { 240 Node* nn = clone(); 241 if (in1 != NULL) nn->set_req(1, in1); 242 if (in2 != NULL) nn->set_req(2, in2); 243 return nn; 244 } 245 246private: 247 // Shared setup for the above constructors. 248 // Handles all interactions with Compile::current. 249 // Puts initial values in all Node fields except _idx. 250 // Returns the initial value for _idx, which cannot 251 // be initialized by assignment. 252 inline int Init(int req, Compile* C); 253 254//----------------- input edge handling 255protected: 256 friend class PhaseCFG; // Access to address of _in array elements 257 Node **_in; // Array of use-def references to Nodes 258 Node **_out; // Array of def-use references to Nodes 259 260 // Input edges are split into two catagories. Required edges are required 261 // for semantic correctness; order is important and NULLs are allowed. 262 // Precedence edges are used to help determine execution order and are 263 // added, e.g., for scheduling purposes. They are unordered and not 264 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 265 // are required, from _cnt to _max-1 are precedence edges. 266 node_idx_t _cnt; // Total number of required Node inputs. 267 268 node_idx_t _max; // Actual length of input array. 269 270 // Output edges are an unordered list of def-use edges which exactly 271 // correspond to required input edges which point from other nodes 272 // to this one. Thus the count of the output edges is the number of 273 // users of this node. 274 node_idx_t _outcnt; // Total number of Node outputs. 275 276 node_idx_t _outmax; // Actual length of output array. 277 278 // Grow the actual input array to the next larger power-of-2 bigger than len. 279 void grow( uint len ); 280 // Grow the output array to the next larger power-of-2 bigger than len. 281 void out_grow( uint len ); 282 283 public: 284 // Each Node is assigned a unique small/dense number. This number is used 285 // to index into auxiliary arrays of data and bitvectors. 286 // It is declared const to defend against inadvertant assignment, 287 // since it is used by clients as a naked field. 288 const node_idx_t _idx; 289 290 // Get the (read-only) number of input edges 291 uint req() const { return _cnt; } 292 uint len() const { return _max; } 293 // Get the (read-only) number of output edges 294 uint outcnt() const { return _outcnt; } 295 296#if OPTO_DU_ITERATOR_ASSERT 297 // Iterate over the out-edges of this node. Deletions are illegal. 298 inline DUIterator outs() const; 299 // Use this when the out array might have changed to suppress asserts. 300 inline DUIterator& refresh_out_pos(DUIterator& i) const; 301 // Does the node have an out at this position? (Used for iteration.) 302 inline bool has_out(DUIterator& i) const; 303 inline Node* out(DUIterator& i) const; 304 // Iterate over the out-edges of this node. All changes are illegal. 305 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; 306 inline Node* fast_out(DUIterator_Fast& i) const; 307 // Iterate over the out-edges of this node, deleting one at a time. 308 inline DUIterator_Last last_outs(DUIterator_Last& min) const; 309 inline Node* last_out(DUIterator_Last& i) const; 310 // The inline bodies of all these methods are after the iterator definitions. 311#else 312 // Iterate over the out-edges of this node. Deletions are illegal. 313 // This iteration uses integral indexes, to decouple from array reallocations. 314 DUIterator outs() const { return 0; } 315 // Use this when the out array might have changed to suppress asserts. 316 DUIterator refresh_out_pos(DUIterator i) const { return i; } 317 318 // Reference to the i'th output Node. Error if out of bounds. 319 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } 320 // Does the node have an out at this position? (Used for iteration.) 321 bool has_out(DUIterator i) const { return i < _outcnt; } 322 323 // Iterate over the out-edges of this node. All changes are illegal. 324 // This iteration uses a pointer internal to the out array. 325 DUIterator_Fast fast_outs(DUIterator_Fast& max) const { 326 Node** out = _out; 327 // Assign a limit pointer to the reference argument: 328 max = out + (ptrdiff_t)_outcnt; 329 // Return the base pointer: 330 return out; 331 } 332 Node* fast_out(DUIterator_Fast i) const { return *i; } 333 // Iterate over the out-edges of this node, deleting one at a time. 334 // This iteration uses a pointer internal to the out array. 335 DUIterator_Last last_outs(DUIterator_Last& min) const { 336 Node** out = _out; 337 // Assign a limit pointer to the reference argument: 338 min = out; 339 // Return the pointer to the start of the iteration: 340 return out + (ptrdiff_t)_outcnt - 1; 341 } 342 Node* last_out(DUIterator_Last i) const { return *i; } 343#endif 344 345 // Reference to the i'th input Node. Error if out of bounds. 346 Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; } 347 // Reference to the i'th output Node. Error if out of bounds. 348 // Use this accessor sparingly. We are going trying to use iterators instead. 349 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } 350 // Return the unique out edge. 351 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } 352 // Delete out edge at position 'i' by moving last out edge to position 'i' 353 void raw_del_out(uint i) { 354 assert(i < _outcnt,"oob"); 355 assert(_outcnt > 0,"oob"); 356 #if OPTO_DU_ITERATOR_ASSERT 357 // Record that a change happened here. 358 debug_only(_last_del = _out[i]; ++_del_tick); 359 #endif 360 _out[i] = _out[--_outcnt]; 361 // Smash the old edge so it can't be used accidentally. 362 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 363 } 364 365#ifdef ASSERT 366 bool is_dead() const; 367#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) 368#endif 369 370 // Set a required input edge, also updates corresponding output edge 371 void add_req( Node *n ); // Append a NEW required input 372 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). 373 void del_req( uint idx ); // Delete required edge & compact 374 void ins_req( uint i, Node *n ); // Insert a NEW required input 375 void set_req( uint i, Node *n ) { 376 assert( is_not_dead(n), "can not use dead node"); 377 assert( i < _cnt, "oob"); 378 assert( !VerifyHashTableKeys || _hash_lock == 0, 379 "remove node from hash table before modifying it"); 380 Node** p = &_in[i]; // cache this._in, across the del_out call 381 if (*p != NULL) (*p)->del_out((Node *)this); 382 (*p) = n; 383 if (n != NULL) n->add_out((Node *)this); 384 } 385 // Light version of set_req() to init inputs after node creation. 386 void init_req( uint i, Node *n ) { 387 assert( i == 0 && this == n || 388 is_not_dead(n), "can not use dead node"); 389 assert( i < _cnt, "oob"); 390 assert( !VerifyHashTableKeys || _hash_lock == 0, 391 "remove node from hash table before modifying it"); 392 assert( _in[i] == NULL, "sanity"); 393 _in[i] = n; 394 if (n != NULL) n->add_out((Node *)this); 395 } 396 // Find first occurrence of n among my edges: 397 int find_edge(Node* n); 398 int replace_edge(Node* old, Node* neww); 399 // NULL out all inputs to eliminate incoming Def-Use edges. 400 // Return the number of edges between 'n' and 'this' 401 int disconnect_inputs(Node *n); 402 403 // Quickly, return true if and only if I am Compile::current()->top(). 404 bool is_top() const { 405 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); 406 return (_out == NULL); 407 } 408 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) 409 void setup_is_top(); 410 411 // Strip away casting. (It is depth-limited.) 412 Node* uncast() const; 413 414private: 415 static Node* uncast_helper(const Node* n); 416 417 // Add an output edge to the end of the list 418 void add_out( Node *n ) { 419 if (is_top()) return; 420 if( _outcnt == _outmax ) out_grow(_outcnt); 421 _out[_outcnt++] = n; 422 } 423 // Delete an output edge 424 void del_out( Node *n ) { 425 if (is_top()) return; 426 Node** outp = &_out[_outcnt]; 427 // Find and remove n 428 do { 429 assert(outp > _out, "Missing Def-Use edge"); 430 } while (*--outp != n); 431 *outp = _out[--_outcnt]; 432 // Smash the old edge so it can't be used accidentally. 433 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); 434 // Record that a change happened here. 435 #if OPTO_DU_ITERATOR_ASSERT 436 debug_only(_last_del = n; ++_del_tick); 437 #endif 438 } 439 440public: 441 // Globally replace this node by a given new node, updating all uses. 442 void replace_by(Node* new_node); 443 // Globally replace this node by a given new node, updating all uses 444 // and cutting input edges of old node. 445 void subsume_by(Node* new_node) { 446 replace_by(new_node); 447 disconnect_inputs(NULL); 448 } 449 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); 450 // Find the one non-null required input. RegionNode only 451 Node *nonnull_req() const; 452 // Add or remove precedence edges 453 void add_prec( Node *n ); 454 void rm_prec( uint i ); 455 void set_prec( uint i, Node *n ) { 456 assert( is_not_dead(n), "can not use dead node"); 457 assert( i >= _cnt, "not a precedence edge"); 458 if (_in[i] != NULL) _in[i]->del_out((Node *)this); 459 _in[i] = n; 460 if (n != NULL) n->add_out((Node *)this); 461 } 462 // Set this node's index, used by cisc_version to replace current node 463 void set_idx(uint new_idx) { 464 const node_idx_t* ref = &_idx; 465 *(node_idx_t*)ref = new_idx; 466 } 467 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) 468 void swap_edges(uint i1, uint i2) { 469 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 470 // Def-Use info is unchanged 471 Node* n1 = in(i1); 472 Node* n2 = in(i2); 473 _in[i1] = n2; 474 _in[i2] = n1; 475 // If this node is in the hash table, make sure it doesn't need a rehash. 476 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); 477 } 478 479 // Iterators over input Nodes for a Node X are written as: 480 // for( i = 0; i < X.req(); i++ ) ... X[i] ... 481 // NOTE: Required edges can contain embedded NULL pointers. 482 483//----------------- Other Node Properties 484 485 // Generate class id for some ideal nodes to avoid virtual query 486 // methods is_<Node>(). 487 // Class id is the set of bits corresponded to the node class and all its 488 // super classes so that queries for super classes are also valid. 489 // Subclasses of the same super class have different assigned bit 490 // (the third parameter in the macro DEFINE_CLASS_ID). 491 // Classes with deeper hierarchy are declared first. 492 // Classes with the same hierarchy depth are sorted by usage frequency. 493 // 494 // The query method masks the bits to cut off bits of subclasses 495 // and then compare the result with the class id 496 // (see the macro DEFINE_CLASS_QUERY below). 497 // 498 // Class_MachCall=30, ClassMask_MachCall=31 499 // 12 8 4 0 500 // 0 0 0 0 0 0 0 0 1 1 1 1 0 501 // | | | | 502 // | | | Bit_Mach=2 503 // | | Bit_MachReturn=4 504 // | Bit_MachSafePoint=8 505 // Bit_MachCall=16 506 // 507 // Class_CountedLoop=56, ClassMask_CountedLoop=63 508 // 12 8 4 0 509 // 0 0 0 0 0 0 0 1 1 1 0 0 0 510 // | | | 511 // | | Bit_Region=8 512 // | Bit_Loop=16 513 // Bit_CountedLoop=32 514 515 #define DEFINE_CLASS_ID(cl, supcl, subn) \ 516 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ 517 Class_##cl = Class_##supcl + Bit_##cl , \ 518 ClassMask_##cl = ((Bit_##cl << 1) - 1) , 519 520 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods 521 // so that it's values fits into 16 bits. 522 enum NodeClasses { 523 Bit_Node = 0x0000, 524 Class_Node = 0x0000, 525 ClassMask_Node = 0xFFFF, 526 527 DEFINE_CLASS_ID(Multi, Node, 0) 528 DEFINE_CLASS_ID(SafePoint, Multi, 0) 529 DEFINE_CLASS_ID(Call, SafePoint, 0) 530 DEFINE_CLASS_ID(CallJava, Call, 0) 531 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) 532 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) 533 DEFINE_CLASS_ID(CallRuntime, Call, 1) 534 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) 535 DEFINE_CLASS_ID(Allocate, Call, 2) 536 DEFINE_CLASS_ID(AllocateArray, Allocate, 0) 537 DEFINE_CLASS_ID(AbstractLock, Call, 3) 538 DEFINE_CLASS_ID(Lock, AbstractLock, 0) 539 DEFINE_CLASS_ID(Unlock, AbstractLock, 1) 540 DEFINE_CLASS_ID(MultiBranch, Multi, 1) 541 DEFINE_CLASS_ID(PCTable, MultiBranch, 0) 542 DEFINE_CLASS_ID(Catch, PCTable, 0) 543 DEFINE_CLASS_ID(Jump, PCTable, 1) 544 DEFINE_CLASS_ID(If, MultiBranch, 1) 545 DEFINE_CLASS_ID(CountedLoopEnd, If, 0) 546 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) 547 DEFINE_CLASS_ID(Start, Multi, 2) 548 DEFINE_CLASS_ID(MemBar, Multi, 3) 549 DEFINE_CLASS_ID(Initialize, MemBar, 0) 550 551 DEFINE_CLASS_ID(Mach, Node, 1) 552 DEFINE_CLASS_ID(MachReturn, Mach, 0) 553 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) 554 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) 555 DEFINE_CLASS_ID(MachCallJava, MachCall, 0) 556 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) 557 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 558 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 559 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 560 DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) 561 DEFINE_CLASS_ID(MachNullCheck, Mach, 2) 562 DEFINE_CLASS_ID(MachIf, Mach, 3) 563 DEFINE_CLASS_ID(MachTemp, Mach, 4) 564 565 DEFINE_CLASS_ID(Proj, Node, 2) 566 DEFINE_CLASS_ID(CatchProj, Proj, 0) 567 DEFINE_CLASS_ID(JumpProj, Proj, 1) 568 DEFINE_CLASS_ID(IfTrue, Proj, 2) 569 DEFINE_CLASS_ID(IfFalse, Proj, 3) 570 DEFINE_CLASS_ID(Parm, Proj, 4) 571 572 DEFINE_CLASS_ID(Region, Node, 3) 573 DEFINE_CLASS_ID(Loop, Region, 0) 574 DEFINE_CLASS_ID(Root, Loop, 0) 575 DEFINE_CLASS_ID(CountedLoop, Loop, 1) 576 577 DEFINE_CLASS_ID(Sub, Node, 4) 578 DEFINE_CLASS_ID(Cmp, Sub, 0) 579 DEFINE_CLASS_ID(FastLock, Cmp, 0) 580 DEFINE_CLASS_ID(FastUnlock, Cmp, 1) 581 582 DEFINE_CLASS_ID(Type, Node, 5) 583 DEFINE_CLASS_ID(Phi, Type, 0) 584 DEFINE_CLASS_ID(ConstraintCast, Type, 1) 585 DEFINE_CLASS_ID(CheckCastPP, Type, 2) 586 DEFINE_CLASS_ID(CMove, Type, 3) 587 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) 588 DEFINE_CLASS_ID(DecodeN, Type, 5) 589 DEFINE_CLASS_ID(EncodeP, Type, 6) 590 591 DEFINE_CLASS_ID(Mem, Node, 6) 592 DEFINE_CLASS_ID(Load, Mem, 0) 593 DEFINE_CLASS_ID(Store, Mem, 1) 594 DEFINE_CLASS_ID(LoadStore, Mem, 2) 595 596 DEFINE_CLASS_ID(MergeMem, Node, 7) 597 DEFINE_CLASS_ID(Bool, Node, 8) 598 DEFINE_CLASS_ID(AddP, Node, 9) 599 DEFINE_CLASS_ID(BoxLock, Node, 10) 600 DEFINE_CLASS_ID(Add, Node, 11) 601 DEFINE_CLASS_ID(Mul, Node, 12) 602 603 _max_classes = ClassMask_Mul 604 }; 605 #undef DEFINE_CLASS_ID 606 607 // Flags are sorted by usage frequency. 608 enum NodeFlags { 609 Flag_is_Copy = 0x01, // should be first bit to avoid shift 610 Flag_is_Call = Flag_is_Copy << 1, 611 Flag_rematerialize = Flag_is_Call << 1, 612 Flag_needs_anti_dependence_check = Flag_rematerialize << 1, 613 Flag_is_macro = Flag_needs_anti_dependence_check << 1, 614 Flag_is_Con = Flag_is_macro << 1, 615 Flag_is_cisc_alternate = Flag_is_Con << 1, 616 Flag_is_Branch = Flag_is_cisc_alternate << 1, 617 Flag_is_block_start = Flag_is_Branch << 1, 618 Flag_is_Goto = Flag_is_block_start << 1, 619 Flag_is_dead_loop_safe = Flag_is_Goto << 1, 620 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, 621 Flag_is_safepoint_node = Flag_may_be_short_branch << 1, 622 Flag_is_pc_relative = Flag_is_safepoint_node << 1, 623 Flag_is_Vector = Flag_is_pc_relative << 1, 624 _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination 625 }; 626 627private: 628 jushort _class_id; 629 jushort _flags; 630 631protected: 632 // These methods should be called from constructors only. 633 void init_class_id(jushort c) { 634 assert(c <= _max_classes, "invalid node class"); 635 _class_id = c; // cast out const 636 } 637 void init_flags(jushort fl) { 638 assert(fl <= _max_flags, "invalid node flag"); 639 _flags |= fl; 640 } 641 void clear_flag(jushort fl) { 642 assert(fl <= _max_flags, "invalid node flag"); 643 _flags &= ~fl; 644 } 645 646public: 647 const jushort class_id() const { return _class_id; } 648 649 const jushort flags() const { return _flags; } 650 651 // Return a dense integer opcode number 652 virtual int Opcode() const; 653 654 // Virtual inherited Node size 655 virtual uint size_of() const; 656 657 // Other interesting Node properties 658 659 // Special case: is_Call() returns true for both CallNode and MachCallNode. 660 bool is_Call() const { 661 return (_flags & Flag_is_Call) != 0; 662 } 663 664 CallNode *as_Call() const { // Only for CallNode (not for MachCallNode) 665 assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class"); 666 return (CallNode*)this; 667 } 668 669 #define DEFINE_CLASS_QUERY(type) \ 670 bool is_##type() const { \ 671 return ((_class_id & ClassMask_##type) == Class_##type); \ 672 } \ 673 type##Node *as_##type() const { \ 674 assert(is_##type(), "invalid node class"); \ 675 return (type##Node*)this; \ 676 } 677 678 DEFINE_CLASS_QUERY(AbstractLock) 679 DEFINE_CLASS_QUERY(Add) 680 DEFINE_CLASS_QUERY(AddP) 681 DEFINE_CLASS_QUERY(Allocate) 682 DEFINE_CLASS_QUERY(AllocateArray) 683 DEFINE_CLASS_QUERY(Bool) 684 DEFINE_CLASS_QUERY(BoxLock) 685 DEFINE_CLASS_QUERY(CallDynamicJava) 686 DEFINE_CLASS_QUERY(CallJava) 687 DEFINE_CLASS_QUERY(CallLeaf) 688 DEFINE_CLASS_QUERY(CallRuntime) 689 DEFINE_CLASS_QUERY(CallStaticJava) 690 DEFINE_CLASS_QUERY(Catch) 691 DEFINE_CLASS_QUERY(CatchProj) 692 DEFINE_CLASS_QUERY(CheckCastPP) 693 DEFINE_CLASS_QUERY(ConstraintCast) 694 DEFINE_CLASS_QUERY(CMove) 695 DEFINE_CLASS_QUERY(Cmp) 696 DEFINE_CLASS_QUERY(CountedLoop) 697 DEFINE_CLASS_QUERY(CountedLoopEnd) 698 DEFINE_CLASS_QUERY(DecodeN) 699 DEFINE_CLASS_QUERY(EncodeP) 700 DEFINE_CLASS_QUERY(FastLock) 701 DEFINE_CLASS_QUERY(FastUnlock) 702 DEFINE_CLASS_QUERY(If) 703 DEFINE_CLASS_QUERY(IfFalse) 704 DEFINE_CLASS_QUERY(IfTrue) 705 DEFINE_CLASS_QUERY(Initialize) 706 DEFINE_CLASS_QUERY(Jump) 707 DEFINE_CLASS_QUERY(JumpProj) 708 DEFINE_CLASS_QUERY(Load) 709 DEFINE_CLASS_QUERY(LoadStore) 710 DEFINE_CLASS_QUERY(Lock) 711 DEFINE_CLASS_QUERY(Loop) 712 DEFINE_CLASS_QUERY(Mach) 713 DEFINE_CLASS_QUERY(MachCall) 714 DEFINE_CLASS_QUERY(MachCallDynamicJava) 715 DEFINE_CLASS_QUERY(MachCallJava) 716 DEFINE_CLASS_QUERY(MachCallLeaf) 717 DEFINE_CLASS_QUERY(MachCallRuntime) 718 DEFINE_CLASS_QUERY(MachCallStaticJava) 719 DEFINE_CLASS_QUERY(MachIf) 720 DEFINE_CLASS_QUERY(MachNullCheck) 721 DEFINE_CLASS_QUERY(MachReturn) 722 DEFINE_CLASS_QUERY(MachSafePoint) 723 DEFINE_CLASS_QUERY(MachSpillCopy) 724 DEFINE_CLASS_QUERY(MachTemp) 725 DEFINE_CLASS_QUERY(Mem) 726 DEFINE_CLASS_QUERY(MemBar) 727 DEFINE_CLASS_QUERY(MergeMem) 728 DEFINE_CLASS_QUERY(Mul) 729 DEFINE_CLASS_QUERY(Multi) 730 DEFINE_CLASS_QUERY(MultiBranch) 731 DEFINE_CLASS_QUERY(Parm) 732 DEFINE_CLASS_QUERY(PCTable) 733 DEFINE_CLASS_QUERY(Phi) 734 DEFINE_CLASS_QUERY(Proj) 735 DEFINE_CLASS_QUERY(Region) 736 DEFINE_CLASS_QUERY(Root) 737 DEFINE_CLASS_QUERY(SafePoint) 738 DEFINE_CLASS_QUERY(SafePointScalarObject) 739 DEFINE_CLASS_QUERY(Start) 740 DEFINE_CLASS_QUERY(Store) 741 DEFINE_CLASS_QUERY(Sub) 742 DEFINE_CLASS_QUERY(Type) 743 DEFINE_CLASS_QUERY(Unlock) 744 745 #undef DEFINE_CLASS_QUERY 746 747 // duplicate of is_MachSpillCopy() 748 bool is_SpillCopy () const { 749 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); 750 } 751 752 bool is_Con () const { return (_flags & Flag_is_Con) != 0; } 753 bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; } 754 // The data node which is safe to leave in dead loop during IGVN optimization. 755 bool is_dead_loop_safe() const { 756 return is_Phi() || (is_Proj() && in(0) == NULL) || 757 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && 758 (!is_Proj() || !in(0)->is_Allocate())); 759 } 760 761 // is_Copy() returns copied edge index (0 or 1) 762 uint is_Copy() const { return (_flags & Flag_is_Copy); } 763 764 virtual bool is_CFG() const { return false; } 765 766 // If this node is control-dependent on a test, can it be 767 // rerouted to a dominating equivalent test? This is usually 768 // true of non-CFG nodes, but can be false for operations which 769 // depend for their correct sequencing on more than one test. 770 // (In that case, hoisting to a dominating test may silently 771 // skip some other important test.) 772 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; 773 774 // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd' 775 bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; } 776 777 // When building basic blocks, I need to have a notion of block beginning 778 // Nodes, next block selector Nodes (block enders), and next block 779 // projections. These calls need to work on their machine equivalents. The 780 // Ideal beginning Nodes are RootNode, RegionNode and StartNode. 781 bool is_block_start() const { 782 if ( is_Region() ) 783 return this == (const Node*)in(0); 784 else 785 return (_flags & Flag_is_block_start) != 0; 786 } 787 788 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, 789 // Goto and Return. This call also returns the block ending Node. 790 virtual const Node *is_block_proj() const; 791 792 // The node is a "macro" node which needs to be expanded before matching 793 bool is_macro() const { return (_flags & Flag_is_macro) != 0; } 794 795 // Value is a vector of primitive values 796 bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; } 797 798//----------------- Optimization 799 800 // Get the worst-case Type output for this Node. 801 virtual const class Type *bottom_type() const; 802 803 // If we find a better type for a node, try to record it permanently. 804 // Return true if this node actually changed. 805 // Be sure to do the hash_delete game in the "rehash" variant. 806 void raise_bottom_type(const Type* new_type); 807 808 // Get the address type with which this node uses and/or defs memory, 809 // or NULL if none. The address type is conservatively wide. 810 // Returns non-null for calls, membars, loads, stores, etc. 811 // Returns TypePtr::BOTTOM if the node touches memory "broadly". 812 virtual const class TypePtr *adr_type() const { return NULL; } 813 814 // Return an existing node which computes the same function as this node. 815 // The optimistic combined algorithm requires this to return a Node which 816 // is a small number of steps away (e.g., one of my inputs). 817 virtual Node *Identity( PhaseTransform *phase ); 818 819 // Return the set of values this Node can take on at runtime. 820 virtual const Type *Value( PhaseTransform *phase ) const; 821 822 // Return a node which is more "ideal" than the current node. 823 // The invariants on this call are subtle. If in doubt, read the 824 // treatise in node.cpp above the default implemention AND TEST WITH 825 // +VerifyIterativeGVN! 826 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 827 828 // Some nodes have specific Ideal subgraph transformations only if they are 829 // unique users of specific nodes. Such nodes should be put on IGVN worklist 830 // for the transformations to happen. 831 bool has_special_unique_user() const; 832 833 // Skip Proj and CatchProj nodes chains. Check for Null and Top. 834 Node* find_exact_control(Node* ctrl); 835 836 // Check if 'this' node dominates or equal to 'sub'. 837 bool dominates(Node* sub, Node_List &nlist); 838 839protected: 840 bool remove_dead_region(PhaseGVN *phase, bool can_reshape); 841public: 842 843 // Idealize graph, using DU info. Done after constant propagation 844 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 845 846 // See if there is valid pipeline info 847 static const Pipeline *pipeline_class(); 848 virtual const Pipeline *pipeline() const; 849 850 // Compute the latency from the def to this instruction of the ith input node 851 uint latency(uint i); 852 853 // Hash & compare functions, for pessimistic value numbering 854 855 // If the hash function returns the special sentinel value NO_HASH, 856 // the node is guaranteed never to compare equal to any other node. 857 // If we accidently generate a hash with value NO_HASH the node 858 // won't go into the table and we'll lose a little optimization. 859 enum { NO_HASH = 0 }; 860 virtual uint hash() const; 861 virtual uint cmp( const Node &n ) const; 862 863 // Operation appears to be iteratively computed (such as an induction variable) 864 // It is possible for this operation to return false for a loop-varying 865 // value, if it appears (by local graph inspection) to be computed by a simple conditional. 866 bool is_iteratively_computed(); 867 868 // Determine if a node is Counted loop induction variable. 869 // The method is defined in loopnode.cpp. 870 const Node* is_loop_iv() const; 871 872 // Return a node with opcode "opc" and same inputs as "this" if one can 873 // be found; Otherwise return NULL; 874 Node* find_similar(int opc); 875 876 // Return the unique control out if only one. Null if none or more than one. 877 Node* unique_ctrl_out(); 878 879//----------------- Code Generation 880 881 // Ideal register class for Matching. Zero means unmatched instruction 882 // (these are cloned instead of converted to machine nodes). 883 virtual uint ideal_reg() const; 884 885 static const uint NotAMachineReg; // must be > max. machine register 886 887 // Do we Match on this edge index or not? Generally false for Control 888 // and true for everything else. Weird for calls & returns. 889 virtual uint match_edge(uint idx) const; 890 891 // Register class output is returned in 892 virtual const RegMask &out_RegMask() const; 893 // Register class input is expected in 894 virtual const RegMask &in_RegMask(uint) const; 895 // Should we clone rather than spill this instruction? 896 bool rematerialize() const; 897 898 // Return JVM State Object if this Node carries debug info, or NULL otherwise 899 virtual JVMState* jvms() const; 900 901 // Print as assembly 902 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; 903 // Emit bytes starting at parameter 'ptr' 904 // Bump 'ptr' by the number of output bytes 905 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; 906 // Size of instruction in bytes 907 virtual uint size(PhaseRegAlloc *ra_) const; 908 909 // Convenience function to extract an integer constant from a node. 910 // If it is not an integer constant (either Con, CastII, or Mach), 911 // return value_if_unknown. 912 jint find_int_con(jint value_if_unknown) const { 913 const TypeInt* t = find_int_type(); 914 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 915 } 916 // Return the constant, knowing it is an integer constant already 917 jint get_int() const { 918 const TypeInt* t = find_int_type(); 919 guarantee(t != NULL, "must be con"); 920 return t->get_con(); 921 } 922 // Here's where the work is done. Can produce non-constant int types too. 923 const TypeInt* find_int_type() const; 924 925 // Same thing for long (and intptr_t, via type.hpp): 926 jlong get_long() const { 927 const TypeLong* t = find_long_type(); 928 guarantee(t != NULL, "must be con"); 929 return t->get_con(); 930 } 931 jlong find_long_con(jint value_if_unknown) const { 932 const TypeLong* t = find_long_type(); 933 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; 934 } 935 const TypeLong* find_long_type() const; 936 937 // These guys are called by code generated by ADLC: 938 intptr_t get_ptr() const; 939 intptr_t get_narrowcon() const; 940 jdouble getd() const; 941 jfloat getf() const; 942 943 // Nodes which are pinned into basic blocks 944 virtual bool pinned() const { return false; } 945 946 // Nodes which use memory without consuming it, hence need antidependences 947 // More specifically, needs_anti_dependence_check returns true iff the node 948 // (a) does a load, and (b) does not perform a store (except perhaps to a 949 // stack slot or some other unaliased location). 950 bool needs_anti_dependence_check() const; 951 952 // Return which operand this instruction may cisc-spill. In other words, 953 // return operand position that can convert from reg to memory access 954 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } 955 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } 956 957//----------------- Graph walking 958public: 959 // Walk and apply member functions recursively. 960 // Supplied (this) pointer is root. 961 void walk(NFunc pre, NFunc post, void *env); 962 static void nop(Node &, void*); // Dummy empty function 963 static void packregion( Node &n, void* ); 964private: 965 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); 966 967//----------------- Printing, etc 968public: 969#ifndef PRODUCT 970 Node* find(int idx) const; // Search the graph for the given idx. 971 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. 972 void dump() const; // Print this node, 973 void dump(int depth) const; // Print this node, recursively to depth d 974 void dump_ctrl(int depth) const; // Print control nodes, to depth d 975 virtual void dump_req() const; // Print required-edge info 976 virtual void dump_prec() const; // Print precedence-edge info 977 virtual void dump_out() const; // Print the output edge info 978 virtual void dump_spec(outputStream *st) const {}; // Print per-node info 979 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges 980 void verify() const; // Check Def-Use info for my subgraph 981 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); 982 983 // This call defines a class-unique string used to identify class instances 984 virtual const char *Name() const; 985 986 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) 987 // RegMask Print Functions 988 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } 989 void dump_out_regmask() { out_RegMask().dump(); } 990 static int _in_dump_cnt; 991 static bool in_dump() { return _in_dump_cnt > 0; } 992 void fast_dump() const { 993 tty->print("%4d: %-17s", _idx, Name()); 994 for (uint i = 0; i < len(); i++) 995 if (in(i)) 996 tty->print(" %4d", in(i)->_idx); 997 else 998 tty->print(" NULL"); 999 tty->print("\n"); 1000 } 1001#endif 1002#ifdef ASSERT 1003 void verify_construction(); 1004 bool verify_jvms(const JVMState* jvms) const; 1005 int _debug_idx; // Unique value assigned to every node. 1006 int debug_idx() const { return _debug_idx; } 1007 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } 1008 1009 Node* _debug_orig; // Original version of this, if any. 1010 Node* debug_orig() const { return _debug_orig; } 1011 void set_debug_orig(Node* orig); // _debug_orig = orig 1012 1013 int _hash_lock; // Barrier to modifications of nodes in the hash table 1014 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } 1015 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } 1016 1017 static void init_NodeProperty(); 1018 1019 #if OPTO_DU_ITERATOR_ASSERT 1020 const Node* _last_del; // The last deleted node. 1021 uint _del_tick; // Bumped when a deletion happens.. 1022 #endif 1023#endif 1024}; 1025 1026//----------------------------------------------------------------------------- 1027// Iterators over DU info, and associated Node functions. 1028 1029#if OPTO_DU_ITERATOR_ASSERT 1030 1031// Common code for assertion checking on DU iterators. 1032class DUIterator_Common VALUE_OBJ_CLASS_SPEC { 1033#ifdef ASSERT 1034 protected: 1035 bool _vdui; // cached value of VerifyDUIterators 1036 const Node* _node; // the node containing the _out array 1037 uint _outcnt; // cached node->_outcnt 1038 uint _del_tick; // cached node->_del_tick 1039 Node* _last; // last value produced by the iterator 1040 1041 void sample(const Node* node); // used by c'tor to set up for verifies 1042 void verify(const Node* node, bool at_end_ok = false); 1043 void verify_resync(); 1044 void reset(const DUIterator_Common& that); 1045 1046// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators 1047 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } 1048#else 1049 #define I_VDUI_ONLY(i,x) { } 1050#endif //ASSERT 1051}; 1052 1053#define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) 1054 1055// Default DU iterator. Allows appends onto the out array. 1056// Allows deletion from the out array only at the current point. 1057// Usage: 1058// for (DUIterator i = x->outs(); x->has_out(i); i++) { 1059// Node* y = x->out(i); 1060// ... 1061// } 1062// Compiles in product mode to a unsigned integer index, which indexes 1063// onto a repeatedly reloaded base pointer of x->_out. The loop predicate 1064// also reloads x->_outcnt. If you delete, you must perform "--i" just 1065// before continuing the loop. You must delete only the last-produced 1066// edge. You must delete only a single copy of the last-produced edge, 1067// or else you must delete all copies at once (the first time the edge 1068// is produced by the iterator). 1069class DUIterator : public DUIterator_Common { 1070 friend class Node; 1071 1072 // This is the index which provides the product-mode behavior. 1073 // Whatever the product-mode version of the system does to the 1074 // DUI index is done to this index. All other fields in 1075 // this class are used only for assertion checking. 1076 uint _idx; 1077 1078 #ifdef ASSERT 1079 uint _refresh_tick; // Records the refresh activity. 1080 1081 void sample(const Node* node); // Initialize _refresh_tick etc. 1082 void verify(const Node* node, bool at_end_ok = false); 1083 void verify_increment(); // Verify an increment operation. 1084 void verify_resync(); // Verify that we can back up over a deletion. 1085 void verify_finish(); // Verify that the loop terminated properly. 1086 void refresh(); // Resample verification info. 1087 void reset(const DUIterator& that); // Resample after assignment. 1088 #endif 1089 1090 DUIterator(const Node* node, int dummy_to_avoid_conversion) 1091 { _idx = 0; debug_only(sample(node)); } 1092 1093 public: 1094 // initialize to garbage; clear _vdui to disable asserts 1095 DUIterator() 1096 { /*initialize to garbage*/ debug_only(_vdui = false); } 1097 1098 void operator++(int dummy_to_specify_postfix_op) 1099 { _idx++; VDUI_ONLY(verify_increment()); } 1100 1101 void operator--() 1102 { VDUI_ONLY(verify_resync()); --_idx; } 1103 1104 ~DUIterator() 1105 { VDUI_ONLY(verify_finish()); } 1106 1107 void operator=(const DUIterator& that) 1108 { _idx = that._idx; debug_only(reset(that)); } 1109}; 1110 1111DUIterator Node::outs() const 1112 { return DUIterator(this, 0); } 1113DUIterator& Node::refresh_out_pos(DUIterator& i) const 1114 { I_VDUI_ONLY(i, i.refresh()); return i; } 1115bool Node::has_out(DUIterator& i) const 1116 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } 1117Node* Node::out(DUIterator& i) const 1118 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } 1119 1120 1121// Faster DU iterator. Disallows insertions into the out array. 1122// Allows deletion from the out array only at the current point. 1123// Usage: 1124// for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { 1125// Node* y = x->fast_out(i); 1126// ... 1127// } 1128// Compiles in product mode to raw Node** pointer arithmetic, with 1129// no reloading of pointers from the original node x. If you delete, 1130// you must perform "--i; --imax" just before continuing the loop. 1131// If you delete multiple copies of the same edge, you must decrement 1132// imax, but not i, multiple times: "--i, imax -= num_edges". 1133class DUIterator_Fast : public DUIterator_Common { 1134 friend class Node; 1135 friend class DUIterator_Last; 1136 1137 // This is the pointer which provides the product-mode behavior. 1138 // Whatever the product-mode version of the system does to the 1139 // DUI pointer is done to this pointer. All other fields in 1140 // this class are used only for assertion checking. 1141 Node** _outp; 1142 1143 #ifdef ASSERT 1144 void verify(const Node* node, bool at_end_ok = false); 1145 void verify_limit(); 1146 void verify_resync(); 1147 void verify_relimit(uint n); 1148 void reset(const DUIterator_Fast& that); 1149 #endif 1150 1151 // Note: offset must be signed, since -1 is sometimes passed 1152 DUIterator_Fast(const Node* node, ptrdiff_t offset) 1153 { _outp = node->_out + offset; debug_only(sample(node)); } 1154 1155 public: 1156 // initialize to garbage; clear _vdui to disable asserts 1157 DUIterator_Fast() 1158 { /*initialize to garbage*/ debug_only(_vdui = false); } 1159 1160 void operator++(int dummy_to_specify_postfix_op) 1161 { _outp++; VDUI_ONLY(verify(_node, true)); } 1162 1163 void operator--() 1164 { VDUI_ONLY(verify_resync()); --_outp; } 1165 1166 void operator-=(uint n) // applied to the limit only 1167 { _outp -= n; VDUI_ONLY(verify_relimit(n)); } 1168 1169 bool operator<(DUIterator_Fast& limit) { 1170 I_VDUI_ONLY(*this, this->verify(_node, true)); 1171 I_VDUI_ONLY(limit, limit.verify_limit()); 1172 return _outp < limit._outp; 1173 } 1174 1175 void operator=(const DUIterator_Fast& that) 1176 { _outp = that._outp; debug_only(reset(that)); } 1177}; 1178 1179DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { 1180 // Assign a limit pointer to the reference argument: 1181 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); 1182 // Return the base pointer: 1183 return DUIterator_Fast(this, 0); 1184} 1185Node* Node::fast_out(DUIterator_Fast& i) const { 1186 I_VDUI_ONLY(i, i.verify(this)); 1187 return debug_only(i._last=) *i._outp; 1188} 1189 1190 1191// Faster DU iterator. Requires each successive edge to be removed. 1192// Does not allow insertion of any edges. 1193// Usage: 1194// for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { 1195// Node* y = x->last_out(i); 1196// ... 1197// } 1198// Compiles in product mode to raw Node** pointer arithmetic, with 1199// no reloading of pointers from the original node x. 1200class DUIterator_Last : private DUIterator_Fast { 1201 friend class Node; 1202 1203 #ifdef ASSERT 1204 void verify(const Node* node, bool at_end_ok = false); 1205 void verify_limit(); 1206 void verify_step(uint num_edges); 1207 #endif 1208 1209 // Note: offset must be signed, since -1 is sometimes passed 1210 DUIterator_Last(const Node* node, ptrdiff_t offset) 1211 : DUIterator_Fast(node, offset) { } 1212 1213 void operator++(int dummy_to_specify_postfix_op) {} // do not use 1214 void operator<(int) {} // do not use 1215 1216 public: 1217 DUIterator_Last() { } 1218 // initialize to garbage 1219 1220 void operator--() 1221 { _outp--; VDUI_ONLY(verify_step(1)); } 1222 1223 void operator-=(uint n) 1224 { _outp -= n; VDUI_ONLY(verify_step(n)); } 1225 1226 bool operator>=(DUIterator_Last& limit) { 1227 I_VDUI_ONLY(*this, this->verify(_node, true)); 1228 I_VDUI_ONLY(limit, limit.verify_limit()); 1229 return _outp >= limit._outp; 1230 } 1231 1232 void operator=(const DUIterator_Last& that) 1233 { DUIterator_Fast::operator=(that); } 1234}; 1235 1236DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { 1237 // Assign a limit pointer to the reference argument: 1238 imin = DUIterator_Last(this, 0); 1239 // Return the initial pointer: 1240 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); 1241} 1242Node* Node::last_out(DUIterator_Last& i) const { 1243 I_VDUI_ONLY(i, i.verify(this)); 1244 return debug_only(i._last=) *i._outp; 1245} 1246 1247#endif //OPTO_DU_ITERATOR_ASSERT 1248 1249#undef I_VDUI_ONLY 1250#undef VDUI_ONLY 1251 1252 1253//----------------------------------------------------------------------------- 1254// Map dense integer indices to Nodes. Uses classic doubling-array trick. 1255// Abstractly provides an infinite array of Node*'s, initialized to NULL. 1256// Note that the constructor just zeros things, and since I use Arena 1257// allocation I do not need a destructor to reclaim storage. 1258class Node_Array : public ResourceObj { 1259protected: 1260 Arena *_a; // Arena to allocate in 1261 uint _max; 1262 Node **_nodes; 1263 void grow( uint i ); // Grow array node to fit 1264public: 1265 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { 1266 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); 1267 for( int i = 0; i < OptoNodeListSize; i++ ) { 1268 _nodes[i] = NULL; 1269 } 1270 } 1271 1272 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} 1273 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped 1274 { return (i<_max) ? _nodes[i] : (Node*)NULL; } 1275 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } 1276 Node **adr() { return _nodes; } 1277 // Extend the mapping: index i maps to Node *n. 1278 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } 1279 void insert( uint i, Node *n ); 1280 void remove( uint i ); // Remove, preserving order 1281 void sort( C_sort_func_t func); 1282 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage 1283 void clear(); // Set all entries to NULL, keep storage 1284 uint Size() const { return _max; } 1285 void dump() const; 1286}; 1287 1288class Node_List : public Node_Array { 1289 uint _cnt; 1290public: 1291 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} 1292 Node_List(Arena *a) : Node_Array(a), _cnt(0) {} 1293 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } 1294 void remove( uint i ) { Node_Array::remove(i); _cnt--; } 1295 void push( Node *b ) { map(_cnt++,b); } 1296 void yank( Node *n ); // Find and remove 1297 Node *pop() { return _nodes[--_cnt]; } 1298 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} 1299 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage 1300 uint size() const { return _cnt; } 1301 void dump() const; 1302}; 1303 1304//------------------------------Unique_Node_List------------------------------- 1305class Unique_Node_List : public Node_List { 1306 VectorSet _in_worklist; 1307 uint _clock_index; // Index in list where to pop from next 1308public: 1309 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} 1310 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} 1311 1312 void remove( Node *n ); 1313 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } 1314 VectorSet &member_set(){ return _in_worklist; } 1315 1316 void push( Node *b ) { 1317 if( !_in_worklist.test_set(b->_idx) ) 1318 Node_List::push(b); 1319 } 1320 Node *pop() { 1321 if( _clock_index >= size() ) _clock_index = 0; 1322 Node *b = at(_clock_index); 1323 map( _clock_index++, Node_List::pop()); 1324 _in_worklist >>= b->_idx; 1325 return b; 1326 } 1327 Node *remove( uint i ) { 1328 Node *b = Node_List::at(i); 1329 _in_worklist >>= b->_idx; 1330 map(i,Node_List::pop()); 1331 return b; 1332 } 1333 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } 1334 void clear() { 1335 _in_worklist.Clear(); // Discards storage but grows automatically 1336 Node_List::clear(); 1337 _clock_index = 0; 1338 } 1339 1340 // Used after parsing to remove useless nodes before Iterative GVN 1341 void remove_useless_nodes(VectorSet &useful); 1342 1343#ifndef PRODUCT 1344 void print_set() const { _in_worklist.print(); } 1345#endif 1346}; 1347 1348// Inline definition of Compile::record_for_igvn must be deferred to this point. 1349inline void Compile::record_for_igvn(Node* n) { 1350 _for_igvn->push(n); 1351} 1352 1353//------------------------------Node_Stack------------------------------------- 1354class Node_Stack { 1355protected: 1356 struct INode { 1357 Node *node; // Processed node 1358 uint indx; // Index of next node's child 1359 }; 1360 INode *_inode_top; // tos, stack grows up 1361 INode *_inode_max; // End of _inodes == _inodes + _max 1362 INode *_inodes; // Array storage for the stack 1363 Arena *_a; // Arena to allocate in 1364 void grow(); 1365public: 1366 Node_Stack(int size) { 1367 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1368 _a = Thread::current()->resource_area(); 1369 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1370 _inode_max = _inodes + max; 1371 _inode_top = _inodes - 1; // stack is empty 1372 } 1373 1374 Node_Stack(Arena *a, int size) : _a(a) { 1375 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; 1376 _inodes = NEW_ARENA_ARRAY( _a, INode, max ); 1377 _inode_max = _inodes + max; 1378 _inode_top = _inodes - 1; // stack is empty 1379 } 1380 1381 void pop() { 1382 assert(_inode_top >= _inodes, "node stack underflow"); 1383 --_inode_top; 1384 } 1385 void push(Node *n, uint i) { 1386 ++_inode_top; 1387 if (_inode_top >= _inode_max) grow(); 1388 INode *top = _inode_top; // optimization 1389 top->node = n; 1390 top->indx = i; 1391 } 1392 Node *node() const { 1393 return _inode_top->node; 1394 } 1395 Node* node_at(uint i) const { 1396 assert(_inodes + i <= _inode_top, "in range"); 1397 return _inodes[i].node; 1398 } 1399 uint index() const { 1400 return _inode_top->indx; 1401 } 1402 void set_node(Node *n) { 1403 _inode_top->node = n; 1404 } 1405 void set_index(uint i) { 1406 _inode_top->indx = i; 1407 } 1408 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size 1409 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size 1410 bool is_nonempty() const { return (_inode_top >= _inodes); } 1411 bool is_empty() const { return (_inode_top < _inodes); } 1412 void clear() { _inode_top = _inodes - 1; } // retain storage 1413}; 1414 1415 1416//-----------------------------Node_Notes-------------------------------------- 1417// Debugging or profiling annotations loosely and sparsely associated 1418// with some nodes. See Compile::node_notes_at for the accessor. 1419class Node_Notes VALUE_OBJ_CLASS_SPEC { 1420 JVMState* _jvms; 1421 1422public: 1423 Node_Notes(JVMState* jvms = NULL) { 1424 _jvms = jvms; 1425 } 1426 1427 JVMState* jvms() { return _jvms; } 1428 void set_jvms(JVMState* x) { _jvms = x; } 1429 1430 // True if there is nothing here. 1431 bool is_clear() { 1432 return (_jvms == NULL); 1433 } 1434 1435 // Make there be nothing here. 1436 void clear() { 1437 _jvms = NULL; 1438 } 1439 1440 // Make a new, clean node notes. 1441 static Node_Notes* make(Compile* C) { 1442 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1443 nn->clear(); 1444 return nn; 1445 } 1446 1447 Node_Notes* clone(Compile* C) { 1448 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); 1449 (*nn) = (*this); 1450 return nn; 1451 } 1452 1453 // Absorb any information from source. 1454 bool update_from(Node_Notes* source) { 1455 bool changed = false; 1456 if (source != NULL) { 1457 if (source->jvms() != NULL) { 1458 set_jvms(source->jvms()); 1459 changed = true; 1460 } 1461 } 1462 return changed; 1463 } 1464}; 1465 1466// Inlined accessors for Compile::node_nodes that require the preceding class: 1467inline Node_Notes* 1468Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, 1469 int idx, bool can_grow) { 1470 assert(idx >= 0, "oob"); 1471 int block_idx = (idx >> _log2_node_notes_block_size); 1472 int grow_by = (block_idx - (arr == NULL? 0: arr->length())); 1473 if (grow_by >= 0) { 1474 if (!can_grow) return NULL; 1475 grow_node_notes(arr, grow_by + 1); 1476 } 1477 // (Every element of arr is a sub-array of length _node_notes_block_size.) 1478 return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); 1479} 1480 1481inline bool 1482Compile::set_node_notes_at(int idx, Node_Notes* value) { 1483 if (value == NULL || value->is_clear()) 1484 return false; // nothing to write => write nothing 1485 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); 1486 assert(loc != NULL, ""); 1487 return loc->update_from(value); 1488} 1489 1490 1491//------------------------------TypeNode--------------------------------------- 1492// Node with a Type constant. 1493class TypeNode : public Node { 1494protected: 1495 virtual uint hash() const; // Check the type 1496 virtual uint cmp( const Node &n ) const; 1497 virtual uint size_of() const; // Size is bigger 1498 const Type* const _type; 1499public: 1500 void set_type(const Type* t) { 1501 assert(t != NULL, "sanity"); 1502 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 1503 *(const Type**)&_type = t; // cast away const-ness 1504 // If this node is in the hash table, make sure it doesn't need a rehash. 1505 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 1506 } 1507 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 1508 TypeNode( const Type *t, uint required ) : Node(required), _type(t) { 1509 init_class_id(Class_Type); 1510 } 1511 virtual const Type *Value( PhaseTransform *phase ) const; 1512 virtual const Type *bottom_type() const; 1513 virtual uint ideal_reg() const; 1514#ifndef PRODUCT 1515 virtual void dump_spec(outputStream *st) const; 1516#endif 1517}; 1518