memnode.hpp revision 3718:b9a9ed0f8eeb
1/* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#ifndef SHARE_VM_OPTO_MEMNODE_HPP 26#define SHARE_VM_OPTO_MEMNODE_HPP 27 28#include "opto/multnode.hpp" 29#include "opto/node.hpp" 30#include "opto/opcodes.hpp" 31#include "opto/type.hpp" 32 33// Portions of code courtesy of Clifford Click 34 35class MultiNode; 36class PhaseCCP; 37class PhaseTransform; 38 39//------------------------------MemNode---------------------------------------- 40// Load or Store, possibly throwing a NULL pointer exception 41class MemNode : public Node { 42protected: 43#ifdef ASSERT 44 const TypePtr* _adr_type; // What kind of memory is being addressed? 45#endif 46 virtual uint size_of() const; // Size is bigger (ASSERT only) 47public: 48 enum { Control, // When is it safe to do this load? 49 Memory, // Chunk of memory is being loaded from 50 Address, // Actually address, derived from base 51 ValueIn, // Value to store 52 OopStore // Preceeding oop store, only in StoreCM 53 }; 54protected: 55 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 56 : Node(c0,c1,c2 ) { 57 init_class_id(Class_Mem); 58 debug_only(_adr_type=at; adr_type();) 59 } 60 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 61 : Node(c0,c1,c2,c3) { 62 init_class_id(Class_Mem); 63 debug_only(_adr_type=at; adr_type();) 64 } 65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 66 : Node(c0,c1,c2,c3,c4) { 67 init_class_id(Class_Mem); 68 debug_only(_adr_type=at; adr_type();) 69 } 70 71public: 72 // Helpers for the optimizer. Documented in memnode.cpp. 73 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 74 Node* p2, AllocateNode* a2, 75 PhaseTransform* phase); 76 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 77 78 static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); 79 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase); 80 // This one should probably be a phase-specific function: 81 static bool all_controls_dominate(Node* dom, Node* sub); 82 83 // Find any cast-away of null-ness and keep its control. 84 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); 85 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 86 87 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 88 89 // Shared code for Ideal methods: 90 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 91 92 // Helper function for adr_type() implementations. 93 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 94 95 // Raw access function, to allow copying of adr_type efficiently in 96 // product builds and retain the debug info for debug builds. 97 const TypePtr *raw_adr_type() const { 98#ifdef ASSERT 99 return _adr_type; 100#else 101 return 0; 102#endif 103 } 104 105 // Map a load or store opcode to its corresponding store opcode. 106 // (Return -1 if unknown.) 107 virtual int store_Opcode() const { return -1; } 108 109 // What is the type of the value in memory? (T_VOID mean "unspecified".) 110 virtual BasicType memory_type() const = 0; 111 virtual int memory_size() const { 112#ifdef ASSERT 113 return type2aelembytes(memory_type(), true); 114#else 115 return type2aelembytes(memory_type()); 116#endif 117 } 118 119 // Search through memory states which precede this node (load or store). 120 // Look for an exact match for the address, with no intervening 121 // aliased stores. 122 Node* find_previous_store(PhaseTransform* phase); 123 124 // Can this node (load or store) accurately see a stored value in 125 // the given memory state? (The state may or may not be in(Memory).) 126 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 127 128#ifndef PRODUCT 129 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 130 virtual void dump_spec(outputStream *st) const; 131#endif 132}; 133 134//------------------------------LoadNode--------------------------------------- 135// Load value; requires Memory and Address 136class LoadNode : public MemNode { 137protected: 138 virtual uint cmp( const Node &n ) const; 139 virtual uint size_of() const; // Size is bigger 140 const Type* const _type; // What kind of value is loaded? 141public: 142 143 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt ) 144 : MemNode(c,mem,adr,at), _type(rt) { 145 init_class_id(Class_Load); 146 } 147 148 // Polymorphic factory method: 149 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 150 const TypePtr* at, const Type *rt, BasicType bt ); 151 152 virtual uint hash() const; // Check the type 153 154 // Handle algebraic identities here. If we have an identity, return the Node 155 // we are equivalent to. We look for Load of a Store. 156 virtual Node *Identity( PhaseTransform *phase ); 157 158 // If the load is from Field memory and the pointer is non-null, we can 159 // zero out the control input. 160 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 161 162 // Split instance field load through Phi. 163 Node* split_through_phi(PhaseGVN *phase); 164 165 // Recover original value from boxed values 166 Node *eliminate_autobox(PhaseGVN *phase); 167 168 // Compute a new Type for this node. Basically we just do the pre-check, 169 // then call the virtual add() to set the type. 170 virtual const Type *Value( PhaseTransform *phase ) const; 171 172 // Common methods for LoadKlass and LoadNKlass nodes. 173 const Type *klass_value_common( PhaseTransform *phase ) const; 174 Node *klass_identity_common( PhaseTransform *phase ); 175 176 virtual uint ideal_reg() const; 177 virtual const Type *bottom_type() const; 178 // Following method is copied from TypeNode: 179 void set_type(const Type* t) { 180 assert(t != NULL, "sanity"); 181 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 182 *(const Type**)&_type = t; // cast away const-ness 183 // If this node is in the hash table, make sure it doesn't need a rehash. 184 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 185 } 186 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 187 188 // Do not match memory edge 189 virtual uint match_edge(uint idx) const; 190 191 // Map a load opcode to its corresponding store opcode. 192 virtual int store_Opcode() const = 0; 193 194 // Check if the load's memory input is a Phi node with the same control. 195 bool is_instance_field_load_with_local_phi(Node* ctrl); 196 197#ifndef PRODUCT 198 virtual void dump_spec(outputStream *st) const; 199#endif 200#ifdef ASSERT 201 // Helper function to allow a raw load without control edge for some cases 202 static bool is_immutable_value(Node* adr); 203#endif 204protected: 205 const Type* load_array_final_field(const TypeKlassPtr *tkls, 206 ciKlass* klass) const; 207}; 208 209//------------------------------LoadBNode-------------------------------------- 210// Load a byte (8bits signed) from memory 211class LoadBNode : public LoadNode { 212public: 213 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE ) 214 : LoadNode(c,mem,adr,at,ti) {} 215 virtual int Opcode() const; 216 virtual uint ideal_reg() const { return Op_RegI; } 217 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 218 virtual const Type *Value(PhaseTransform *phase) const; 219 virtual int store_Opcode() const { return Op_StoreB; } 220 virtual BasicType memory_type() const { return T_BYTE; } 221}; 222 223//------------------------------LoadUBNode------------------------------------- 224// Load a unsigned byte (8bits unsigned) from memory 225class LoadUBNode : public LoadNode { 226public: 227 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE ) 228 : LoadNode(c, mem, adr, at, ti) {} 229 virtual int Opcode() const; 230 virtual uint ideal_reg() const { return Op_RegI; } 231 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 232 virtual const Type *Value(PhaseTransform *phase) const; 233 virtual int store_Opcode() const { return Op_StoreB; } 234 virtual BasicType memory_type() const { return T_BYTE; } 235}; 236 237//------------------------------LoadUSNode------------------------------------- 238// Load an unsigned short/char (16bits unsigned) from memory 239class LoadUSNode : public LoadNode { 240public: 241 LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR ) 242 : LoadNode(c,mem,adr,at,ti) {} 243 virtual int Opcode() const; 244 virtual uint ideal_reg() const { return Op_RegI; } 245 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 246 virtual const Type *Value(PhaseTransform *phase) const; 247 virtual int store_Opcode() const { return Op_StoreC; } 248 virtual BasicType memory_type() const { return T_CHAR; } 249}; 250 251//------------------------------LoadSNode-------------------------------------- 252// Load a short (16bits signed) from memory 253class LoadSNode : public LoadNode { 254public: 255 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT ) 256 : LoadNode(c,mem,adr,at,ti) {} 257 virtual int Opcode() const; 258 virtual uint ideal_reg() const { return Op_RegI; } 259 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 260 virtual const Type *Value(PhaseTransform *phase) const; 261 virtual int store_Opcode() const { return Op_StoreC; } 262 virtual BasicType memory_type() const { return T_SHORT; } 263}; 264 265//------------------------------LoadINode-------------------------------------- 266// Load an integer from memory 267class LoadINode : public LoadNode { 268public: 269 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT ) 270 : LoadNode(c,mem,adr,at,ti) {} 271 virtual int Opcode() const; 272 virtual uint ideal_reg() const { return Op_RegI; } 273 virtual int store_Opcode() const { return Op_StoreI; } 274 virtual BasicType memory_type() const { return T_INT; } 275}; 276 277//------------------------------LoadUI2LNode----------------------------------- 278// Load an unsigned integer into long from memory 279class LoadUI2LNode : public LoadNode { 280public: 281 LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT) 282 : LoadNode(c, mem, adr, at, t) {} 283 virtual int Opcode() const; 284 virtual uint ideal_reg() const { return Op_RegL; } 285 virtual int store_Opcode() const { return Op_StoreL; } 286 virtual BasicType memory_type() const { return T_LONG; } 287}; 288 289//------------------------------LoadRangeNode---------------------------------- 290// Load an array length from the array 291class LoadRangeNode : public LoadINode { 292public: 293 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS ) 294 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {} 295 virtual int Opcode() const; 296 virtual const Type *Value( PhaseTransform *phase ) const; 297 virtual Node *Identity( PhaseTransform *phase ); 298 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 299}; 300 301//------------------------------LoadLNode-------------------------------------- 302// Load a long from memory 303class LoadLNode : public LoadNode { 304 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 305 virtual uint cmp( const Node &n ) const { 306 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 307 && LoadNode::cmp(n); 308 } 309 virtual uint size_of() const { return sizeof(*this); } 310 const bool _require_atomic_access; // is piecewise load forbidden? 311 312public: 313 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, 314 const TypeLong *tl = TypeLong::LONG, 315 bool require_atomic_access = false ) 316 : LoadNode(c,mem,adr,at,tl) 317 , _require_atomic_access(require_atomic_access) 318 {} 319 virtual int Opcode() const; 320 virtual uint ideal_reg() const { return Op_RegL; } 321 virtual int store_Opcode() const { return Op_StoreL; } 322 virtual BasicType memory_type() const { return T_LONG; } 323 bool require_atomic_access() { return _require_atomic_access; } 324 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt); 325#ifndef PRODUCT 326 virtual void dump_spec(outputStream *st) const { 327 LoadNode::dump_spec(st); 328 if (_require_atomic_access) st->print(" Atomic!"); 329 } 330#endif 331}; 332 333//------------------------------LoadL_unalignedNode---------------------------- 334// Load a long from unaligned memory 335class LoadL_unalignedNode : public LoadLNode { 336public: 337 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) 338 : LoadLNode(c,mem,adr,at) {} 339 virtual int Opcode() const; 340}; 341 342//------------------------------LoadFNode-------------------------------------- 343// Load a float (64 bits) from memory 344class LoadFNode : public LoadNode { 345public: 346 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT ) 347 : LoadNode(c,mem,adr,at,t) {} 348 virtual int Opcode() const; 349 virtual uint ideal_reg() const { return Op_RegF; } 350 virtual int store_Opcode() const { return Op_StoreF; } 351 virtual BasicType memory_type() const { return T_FLOAT; } 352}; 353 354//------------------------------LoadDNode-------------------------------------- 355// Load a double (64 bits) from memory 356class LoadDNode : public LoadNode { 357public: 358 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE ) 359 : LoadNode(c,mem,adr,at,t) {} 360 virtual int Opcode() const; 361 virtual uint ideal_reg() const { return Op_RegD; } 362 virtual int store_Opcode() const { return Op_StoreD; } 363 virtual BasicType memory_type() const { return T_DOUBLE; } 364}; 365 366//------------------------------LoadD_unalignedNode---------------------------- 367// Load a double from unaligned memory 368class LoadD_unalignedNode : public LoadDNode { 369public: 370 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at ) 371 : LoadDNode(c,mem,adr,at) {} 372 virtual int Opcode() const; 373}; 374 375//------------------------------LoadPNode-------------------------------------- 376// Load a pointer from memory (either object or array) 377class LoadPNode : public LoadNode { 378public: 379 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t ) 380 : LoadNode(c,mem,adr,at,t) {} 381 virtual int Opcode() const; 382 virtual uint ideal_reg() const { return Op_RegP; } 383 virtual int store_Opcode() const { return Op_StoreP; } 384 virtual BasicType memory_type() const { return T_ADDRESS; } 385 // depends_only_on_test is almost always true, and needs to be almost always 386 // true to enable key hoisting & commoning optimizations. However, for the 387 // special case of RawPtr loads from TLS top & end, the control edge carries 388 // the dependence preventing hoisting past a Safepoint instead of the memory 389 // edge. (An unfortunate consequence of having Safepoints not set Raw 390 // Memory; itself an unfortunate consequence of having Nodes which produce 391 // results (new raw memory state) inside of loops preventing all manner of 392 // other optimizations). Basically, it's ugly but so is the alternative. 393 // See comment in macro.cpp, around line 125 expand_allocate_common(). 394 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } 395}; 396 397 398//------------------------------LoadNNode-------------------------------------- 399// Load a narrow oop from memory (either object or array) 400class LoadNNode : public LoadNode { 401public: 402 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t ) 403 : LoadNode(c,mem,adr,at,t) {} 404 virtual int Opcode() const; 405 virtual uint ideal_reg() const { return Op_RegN; } 406 virtual int store_Opcode() const { return Op_StoreN; } 407 virtual BasicType memory_type() const { return T_NARROWOOP; } 408 // depends_only_on_test is almost always true, and needs to be almost always 409 // true to enable key hoisting & commoning optimizations. However, for the 410 // special case of RawPtr loads from TLS top & end, the control edge carries 411 // the dependence preventing hoisting past a Safepoint instead of the memory 412 // edge. (An unfortunate consequence of having Safepoints not set Raw 413 // Memory; itself an unfortunate consequence of having Nodes which produce 414 // results (new raw memory state) inside of loops preventing all manner of 415 // other optimizations). Basically, it's ugly but so is the alternative. 416 // See comment in macro.cpp, around line 125 expand_allocate_common(). 417 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } 418}; 419 420//------------------------------LoadKlassNode---------------------------------- 421// Load a Klass from an object 422class LoadKlassNode : public LoadPNode { 423public: 424 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk ) 425 : LoadPNode(c,mem,adr,at,tk) {} 426 virtual int Opcode() const; 427 virtual const Type *Value( PhaseTransform *phase ) const; 428 virtual Node *Identity( PhaseTransform *phase ); 429 virtual bool depends_only_on_test() const { return true; } 430 431 // Polymorphic factory method: 432 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, 433 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); 434}; 435 436//------------------------------LoadNKlassNode--------------------------------- 437// Load a narrow Klass from an object. 438class LoadNKlassNode : public LoadNNode { 439public: 440 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk ) 441 : LoadNNode(c,mem,adr,at,tk) {} 442 virtual int Opcode() const; 443 virtual uint ideal_reg() const { return Op_RegN; } 444 virtual int store_Opcode() const { return Op_StoreN; } 445 virtual BasicType memory_type() const { return T_NARROWOOP; } 446 447 virtual const Type *Value( PhaseTransform *phase ) const; 448 virtual Node *Identity( PhaseTransform *phase ); 449 virtual bool depends_only_on_test() const { return true; } 450}; 451 452 453//------------------------------StoreNode-------------------------------------- 454// Store value; requires Store, Address and Value 455class StoreNode : public MemNode { 456protected: 457 virtual uint cmp( const Node &n ) const; 458 virtual bool depends_only_on_test() const { return false; } 459 460 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 461 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 462 463public: 464 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) 465 : MemNode(c,mem,adr,at,val) { 466 init_class_id(Class_Store); 467 } 468 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) 469 : MemNode(c,mem,adr,at,val,oop_store) { 470 init_class_id(Class_Store); 471 } 472 473 // Polymorphic factory method: 474 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 475 const TypePtr* at, Node *val, BasicType bt ); 476 477 virtual uint hash() const; // Check the type 478 479 // If the store is to Field memory and the pointer is non-null, we can 480 // zero out the control input. 481 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 482 483 // Compute a new Type for this node. Basically we just do the pre-check, 484 // then call the virtual add() to set the type. 485 virtual const Type *Value( PhaseTransform *phase ) const; 486 487 // Check for identity function on memory (Load then Store at same address) 488 virtual Node *Identity( PhaseTransform *phase ); 489 490 // Do not match memory edge 491 virtual uint match_edge(uint idx) const; 492 493 virtual const Type *bottom_type() const; // returns Type::MEMORY 494 495 // Map a store opcode to its corresponding own opcode, trivially. 496 virtual int store_Opcode() const { return Opcode(); } 497 498 // have all possible loads of the value stored been optimized away? 499 bool value_never_loaded(PhaseTransform *phase) const; 500}; 501 502//------------------------------StoreBNode------------------------------------- 503// Store byte to memory 504class StoreBNode : public StoreNode { 505public: 506 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 507 virtual int Opcode() const; 508 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 509 virtual BasicType memory_type() const { return T_BYTE; } 510}; 511 512//------------------------------StoreCNode------------------------------------- 513// Store char/short to memory 514class StoreCNode : public StoreNode { 515public: 516 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 517 virtual int Opcode() const; 518 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 519 virtual BasicType memory_type() const { return T_CHAR; } 520}; 521 522//------------------------------StoreINode------------------------------------- 523// Store int to memory 524class StoreINode : public StoreNode { 525public: 526 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 527 virtual int Opcode() const; 528 virtual BasicType memory_type() const { return T_INT; } 529}; 530 531//------------------------------StoreLNode------------------------------------- 532// Store long to memory 533class StoreLNode : public StoreNode { 534 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 535 virtual uint cmp( const Node &n ) const { 536 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 537 && StoreNode::cmp(n); 538 } 539 virtual uint size_of() const { return sizeof(*this); } 540 const bool _require_atomic_access; // is piecewise store forbidden? 541 542public: 543 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 544 bool require_atomic_access = false ) 545 : StoreNode(c,mem,adr,at,val) 546 , _require_atomic_access(require_atomic_access) 547 {} 548 virtual int Opcode() const; 549 virtual BasicType memory_type() const { return T_LONG; } 550 bool require_atomic_access() { return _require_atomic_access; } 551 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val); 552#ifndef PRODUCT 553 virtual void dump_spec(outputStream *st) const { 554 StoreNode::dump_spec(st); 555 if (_require_atomic_access) st->print(" Atomic!"); 556 } 557#endif 558}; 559 560//------------------------------StoreFNode------------------------------------- 561// Store float to memory 562class StoreFNode : public StoreNode { 563public: 564 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 565 virtual int Opcode() const; 566 virtual BasicType memory_type() const { return T_FLOAT; } 567}; 568 569//------------------------------StoreDNode------------------------------------- 570// Store double to memory 571class StoreDNode : public StoreNode { 572public: 573 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 574 virtual int Opcode() const; 575 virtual BasicType memory_type() const { return T_DOUBLE; } 576}; 577 578//------------------------------StorePNode------------------------------------- 579// Store pointer to memory 580class StorePNode : public StoreNode { 581public: 582 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 583 virtual int Opcode() const; 584 virtual BasicType memory_type() const { return T_ADDRESS; } 585}; 586 587//------------------------------StoreNNode------------------------------------- 588// Store narrow oop to memory 589class StoreNNode : public StoreNode { 590public: 591 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {} 592 virtual int Opcode() const; 593 virtual BasicType memory_type() const { return T_NARROWOOP; } 594}; 595 596//------------------------------StoreCMNode----------------------------------- 597// Store card-mark byte to memory for CM 598// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 599// Preceeding equivalent StoreCMs may be eliminated. 600class StoreCMNode : public StoreNode { 601 private: 602 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 603 virtual uint cmp( const Node &n ) const { 604 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 605 && StoreNode::cmp(n); 606 } 607 virtual uint size_of() const { return sizeof(*this); } 608 int _oop_alias_idx; // The alias_idx of OopStore 609 610public: 611 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 612 StoreNode(c,mem,adr,at,val,oop_store), 613 _oop_alias_idx(oop_alias_idx) { 614 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 615 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 616 "bad oop alias idx"); 617 } 618 virtual int Opcode() const; 619 virtual Node *Identity( PhaseTransform *phase ); 620 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 621 virtual const Type *Value( PhaseTransform *phase ) const; 622 virtual BasicType memory_type() const { return T_VOID; } // unspecific 623 int oop_alias_idx() const { return _oop_alias_idx; } 624}; 625 626//------------------------------LoadPLockedNode--------------------------------- 627// Load-locked a pointer from memory (either object or array). 628// On Sparc & Intel this is implemented as a normal pointer load. 629// On PowerPC and friends it's a real load-locked. 630class LoadPLockedNode : public LoadPNode { 631public: 632 LoadPLockedNode( Node *c, Node *mem, Node *adr ) 633 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {} 634 virtual int Opcode() const; 635 virtual int store_Opcode() const { return Op_StorePConditional; } 636 virtual bool depends_only_on_test() const { return true; } 637}; 638 639//------------------------------SCMemProjNode--------------------------------------- 640// This class defines a projection of the memory state of a store conditional node. 641// These nodes return a value, but also update memory. 642class SCMemProjNode : public ProjNode { 643public: 644 enum {SCMEMPROJCON = (uint)-2}; 645 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 646 virtual int Opcode() const; 647 virtual bool is_CFG() const { return false; } 648 virtual const Type *bottom_type() const {return Type::MEMORY;} 649 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} 650 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 651 virtual const Type *Value( PhaseTransform *phase ) const; 652#ifndef PRODUCT 653 virtual void dump_spec(outputStream *st) const {}; 654#endif 655}; 656 657//------------------------------LoadStoreNode--------------------------- 658// Note: is_Mem() method returns 'true' for this class. 659class LoadStoreNode : public Node { 660private: 661 const Type* const _type; // What kind of value is loaded? 662 const TypePtr* _adr_type; // What kind of memory is being addressed? 663 virtual uint size_of() const; // Size is bigger 664public: 665 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 666 virtual bool depends_only_on_test() const { return false; } 667 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 668 669 virtual const Type *bottom_type() const { return _type; } 670 virtual uint ideal_reg() const; 671 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 672 673 bool result_not_used() const; 674}; 675 676class LoadStoreConditionalNode : public LoadStoreNode { 677public: 678 enum { 679 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 680 }; 681 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 682}; 683 684//------------------------------StorePConditionalNode--------------------------- 685// Conditionally store pointer to memory, if no change since prior 686// load-locked. Sets flags for success or failure of the store. 687class StorePConditionalNode : public LoadStoreConditionalNode { 688public: 689 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 690 virtual int Opcode() const; 691 // Produces flags 692 virtual uint ideal_reg() const { return Op_RegFlags; } 693}; 694 695//------------------------------StoreIConditionalNode--------------------------- 696// Conditionally store int to memory, if no change since prior 697// load-locked. Sets flags for success or failure of the store. 698class StoreIConditionalNode : public LoadStoreConditionalNode { 699public: 700 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 701 virtual int Opcode() const; 702 // Produces flags 703 virtual uint ideal_reg() const { return Op_RegFlags; } 704}; 705 706//------------------------------StoreLConditionalNode--------------------------- 707// Conditionally store long to memory, if no change since prior 708// load-locked. Sets flags for success or failure of the store. 709class StoreLConditionalNode : public LoadStoreConditionalNode { 710public: 711 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 712 virtual int Opcode() const; 713 // Produces flags 714 virtual uint ideal_reg() const { return Op_RegFlags; } 715}; 716 717 718//------------------------------CompareAndSwapLNode--------------------------- 719class CompareAndSwapLNode : public LoadStoreConditionalNode { 720public: 721 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 722 virtual int Opcode() const; 723}; 724 725 726//------------------------------CompareAndSwapINode--------------------------- 727class CompareAndSwapINode : public LoadStoreConditionalNode { 728public: 729 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 730 virtual int Opcode() const; 731}; 732 733 734//------------------------------CompareAndSwapPNode--------------------------- 735class CompareAndSwapPNode : public LoadStoreConditionalNode { 736public: 737 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 738 virtual int Opcode() const; 739}; 740 741//------------------------------CompareAndSwapNNode--------------------------- 742class CompareAndSwapNNode : public LoadStoreConditionalNode { 743public: 744 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 745 virtual int Opcode() const; 746}; 747 748//------------------------------GetAndAddINode--------------------------- 749class GetAndAddINode : public LoadStoreNode { 750public: 751 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 752 virtual int Opcode() const; 753}; 754 755//------------------------------GetAndAddLNode--------------------------- 756class GetAndAddLNode : public LoadStoreNode { 757public: 758 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 759 virtual int Opcode() const; 760}; 761 762 763//------------------------------GetAndSetINode--------------------------- 764class GetAndSetINode : public LoadStoreNode { 765public: 766 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 767 virtual int Opcode() const; 768}; 769 770//------------------------------GetAndSetINode--------------------------- 771class GetAndSetLNode : public LoadStoreNode { 772public: 773 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 774 virtual int Opcode() const; 775}; 776 777//------------------------------GetAndSetPNode--------------------------- 778class GetAndSetPNode : public LoadStoreNode { 779public: 780 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 781 virtual int Opcode() const; 782}; 783 784//------------------------------GetAndSetNNode--------------------------- 785class GetAndSetNNode : public LoadStoreNode { 786public: 787 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 788 virtual int Opcode() const; 789}; 790 791//------------------------------ClearArray------------------------------------- 792class ClearArrayNode: public Node { 793public: 794 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) 795 : Node(ctrl,arymem,word_cnt,base) { 796 init_class_id(Class_ClearArray); 797 } 798 virtual int Opcode() const; 799 virtual const Type *bottom_type() const { return Type::MEMORY; } 800 // ClearArray modifies array elements, and so affects only the 801 // array memory addressed by the bottom_type of its base address. 802 virtual const class TypePtr *adr_type() const; 803 virtual Node *Identity( PhaseTransform *phase ); 804 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 805 virtual uint match_edge(uint idx) const; 806 807 // Clear the given area of an object or array. 808 // The start offset must always be aligned mod BytesPerInt. 809 // The end offset must always be aligned mod BytesPerLong. 810 // Return the new memory. 811 static Node* clear_memory(Node* control, Node* mem, Node* dest, 812 intptr_t start_offset, 813 intptr_t end_offset, 814 PhaseGVN* phase); 815 static Node* clear_memory(Node* control, Node* mem, Node* dest, 816 intptr_t start_offset, 817 Node* end_offset, 818 PhaseGVN* phase); 819 static Node* clear_memory(Node* control, Node* mem, Node* dest, 820 Node* start_offset, 821 Node* end_offset, 822 PhaseGVN* phase); 823 // Return allocation input memory edge if it is different instance 824 // or itself if it is the one we are looking for. 825 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 826}; 827 828//------------------------------StrIntrinsic------------------------------- 829// Base class for Ideal nodes used in String instrinsic code. 830class StrIntrinsicNode: public Node { 831public: 832 StrIntrinsicNode(Node* control, Node* char_array_mem, 833 Node* s1, Node* c1, Node* s2, Node* c2): 834 Node(control, char_array_mem, s1, c1, s2, c2) { 835 } 836 837 StrIntrinsicNode(Node* control, Node* char_array_mem, 838 Node* s1, Node* s2, Node* c): 839 Node(control, char_array_mem, s1, s2, c) { 840 } 841 842 StrIntrinsicNode(Node* control, Node* char_array_mem, 843 Node* s1, Node* s2): 844 Node(control, char_array_mem, s1, s2) { 845 } 846 847 virtual bool depends_only_on_test() const { return false; } 848 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } 849 virtual uint match_edge(uint idx) const; 850 virtual uint ideal_reg() const { return Op_RegI; } 851 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 852 virtual const Type *Value(PhaseTransform *phase) const; 853}; 854 855//------------------------------StrComp------------------------------------- 856class StrCompNode: public StrIntrinsicNode { 857public: 858 StrCompNode(Node* control, Node* char_array_mem, 859 Node* s1, Node* c1, Node* s2, Node* c2): 860 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; 861 virtual int Opcode() const; 862 virtual const Type* bottom_type() const { return TypeInt::INT; } 863}; 864 865//------------------------------StrEquals------------------------------------- 866class StrEqualsNode: public StrIntrinsicNode { 867public: 868 StrEqualsNode(Node* control, Node* char_array_mem, 869 Node* s1, Node* s2, Node* c): 870 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {}; 871 virtual int Opcode() const; 872 virtual const Type* bottom_type() const { return TypeInt::BOOL; } 873}; 874 875//------------------------------StrIndexOf------------------------------------- 876class StrIndexOfNode: public StrIntrinsicNode { 877public: 878 StrIndexOfNode(Node* control, Node* char_array_mem, 879 Node* s1, Node* c1, Node* s2, Node* c2): 880 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; 881 virtual int Opcode() const; 882 virtual const Type* bottom_type() const { return TypeInt::INT; } 883}; 884 885//------------------------------AryEq--------------------------------------- 886class AryEqNode: public StrIntrinsicNode { 887public: 888 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2): 889 StrIntrinsicNode(control, char_array_mem, s1, s2) {}; 890 virtual int Opcode() const; 891 virtual const Type* bottom_type() const { return TypeInt::BOOL; } 892}; 893 894//------------------------------MemBar----------------------------------------- 895// There are different flavors of Memory Barriers to match the Java Memory 896// Model. Monitor-enter and volatile-load act as Aquires: no following ref 897// can be moved to before them. We insert a MemBar-Acquire after a FastLock or 898// volatile-load. Monitor-exit and volatile-store act as Release: no 899// preceding ref can be moved to after them. We insert a MemBar-Release 900// before a FastUnlock or volatile-store. All volatiles need to be 901// serialized, so we follow all volatile-stores with a MemBar-Volatile to 902// separate it from any following volatile-load. 903class MemBarNode: public MultiNode { 904 virtual uint hash() const ; // { return NO_HASH; } 905 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 906 907 virtual uint size_of() const { return sizeof(*this); } 908 // Memory type this node is serializing. Usually either rawptr or bottom. 909 const TypePtr* _adr_type; 910 911public: 912 enum { 913 Precedent = TypeFunc::Parms // optional edge to force precedence 914 }; 915 MemBarNode(Compile* C, int alias_idx, Node* precedent); 916 virtual int Opcode() const = 0; 917 virtual const class TypePtr *adr_type() const { return _adr_type; } 918 virtual const Type *Value( PhaseTransform *phase ) const; 919 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 920 virtual uint match_edge(uint idx) const { return 0; } 921 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 922 virtual Node *match( const ProjNode *proj, const Matcher *m ); 923 // Factory method. Builds a wide or narrow membar. 924 // Optional 'precedent' becomes an extra edge if not null. 925 static MemBarNode* make(Compile* C, int opcode, 926 int alias_idx = Compile::AliasIdxBot, 927 Node* precedent = NULL); 928}; 929 930// "Acquire" - no following ref can move before (but earlier refs can 931// follow, like an early Load stalled in cache). Requires multi-cpu 932// visibility. Inserted after a volatile load. 933class MemBarAcquireNode: public MemBarNode { 934public: 935 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 936 : MemBarNode(C, alias_idx, precedent) {} 937 virtual int Opcode() const; 938}; 939 940// "Release" - no earlier ref can move after (but later refs can move 941// up, like a speculative pipelined cache-hitting Load). Requires 942// multi-cpu visibility. Inserted before a volatile store. 943class MemBarReleaseNode: public MemBarNode { 944public: 945 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 946 : MemBarNode(C, alias_idx, precedent) {} 947 virtual int Opcode() const; 948}; 949 950// "Acquire" - no following ref can move before (but earlier refs can 951// follow, like an early Load stalled in cache). Requires multi-cpu 952// visibility. Inserted after a FastLock. 953class MemBarAcquireLockNode: public MemBarNode { 954public: 955 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 956 : MemBarNode(C, alias_idx, precedent) {} 957 virtual int Opcode() const; 958}; 959 960// "Release" - no earlier ref can move after (but later refs can move 961// up, like a speculative pipelined cache-hitting Load). Requires 962// multi-cpu visibility. Inserted before a FastUnLock. 963class MemBarReleaseLockNode: public MemBarNode { 964public: 965 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 966 : MemBarNode(C, alias_idx, precedent) {} 967 virtual int Opcode() const; 968}; 969 970class MemBarStoreStoreNode: public MemBarNode { 971public: 972 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 973 : MemBarNode(C, alias_idx, precedent) { 974 init_class_id(Class_MemBarStoreStore); 975 } 976 virtual int Opcode() const; 977}; 978 979// Ordering between a volatile store and a following volatile load. 980// Requires multi-CPU visibility? 981class MemBarVolatileNode: public MemBarNode { 982public: 983 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 984 : MemBarNode(C, alias_idx, precedent) {} 985 virtual int Opcode() const; 986}; 987 988// Ordering within the same CPU. Used to order unsafe memory references 989// inside the compiler when we lack alias info. Not needed "outside" the 990// compiler because the CPU does all the ordering for us. 991class MemBarCPUOrderNode: public MemBarNode { 992public: 993 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 994 : MemBarNode(C, alias_idx, precedent) {} 995 virtual int Opcode() const; 996 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 997}; 998 999// Isolation of object setup after an AllocateNode and before next safepoint. 1000// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1001class InitializeNode: public MemBarNode { 1002 friend class AllocateNode; 1003 1004 enum { 1005 Incomplete = 0, 1006 Complete = 1, 1007 WithArraycopy = 2 1008 }; 1009 int _is_complete; 1010 1011 bool _does_not_escape; 1012 1013public: 1014 enum { 1015 Control = TypeFunc::Control, 1016 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1017 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1018 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1019 }; 1020 1021 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1022 virtual int Opcode() const; 1023 virtual uint size_of() const { return sizeof(*this); } 1024 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1025 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1026 1027 // Manage incoming memory edges via a MergeMem on in(Memory): 1028 Node* memory(uint alias_idx); 1029 1030 // The raw memory edge coming directly from the Allocation. 1031 // The contents of this memory are *always* all-zero-bits. 1032 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1033 1034 // Return the corresponding allocation for this initialization (or null if none). 1035 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1036 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1037 AllocateNode* allocation(); 1038 1039 // Anything other than zeroing in this init? 1040 bool is_non_zero(); 1041 1042 // An InitializeNode must completed before macro expansion is done. 1043 // Completion requires that the AllocateNode must be followed by 1044 // initialization of the new memory to zero, then to any initializers. 1045 bool is_complete() { return _is_complete != Incomplete; } 1046 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1047 1048 // Mark complete. (Must not yet be complete.) 1049 void set_complete(PhaseGVN* phase); 1050 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1051 1052 bool does_not_escape() { return _does_not_escape; } 1053 void set_does_not_escape() { _does_not_escape = true; } 1054 1055#ifdef ASSERT 1056 // ensure all non-degenerate stores are ordered and non-overlapping 1057 bool stores_are_sane(PhaseTransform* phase); 1058#endif //ASSERT 1059 1060 // See if this store can be captured; return offset where it initializes. 1061 // Return 0 if the store cannot be moved (any sort of problem). 1062 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase); 1063 1064 // Capture another store; reformat it to write my internal raw memory. 1065 // Return the captured copy, else NULL if there is some sort of problem. 1066 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase); 1067 1068 // Find captured store which corresponds to the range [start..start+size). 1069 // Return my own memory projection (meaning the initial zero bits) 1070 // if there is no such store. Return NULL if there is a problem. 1071 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1072 1073 // Called when the associated AllocateNode is expanded into CFG. 1074 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1075 intptr_t header_size, Node* size_in_bytes, 1076 PhaseGVN* phase); 1077 1078 private: 1079 void remove_extra_zeroes(); 1080 1081 // Find out where a captured store should be placed (or already is placed). 1082 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1083 PhaseTransform* phase); 1084 1085 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1086 1087 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1088 1089 bool detect_init_independence(Node* n, bool st_is_pinned, int& count); 1090 1091 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1092 PhaseGVN* phase); 1093 1094 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1095}; 1096 1097//------------------------------MergeMem--------------------------------------- 1098// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1099class MergeMemNode: public Node { 1100 virtual uint hash() const ; // { return NO_HASH; } 1101 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1102 friend class MergeMemStream; 1103 MergeMemNode(Node* def); // clients use MergeMemNode::make 1104 1105public: 1106 // If the input is a whole memory state, clone it with all its slices intact. 1107 // Otherwise, make a new memory state with just that base memory input. 1108 // In either case, the result is a newly created MergeMem. 1109 static MergeMemNode* make(Compile* C, Node* base_memory); 1110 1111 virtual int Opcode() const; 1112 virtual Node *Identity( PhaseTransform *phase ); 1113 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1114 virtual uint ideal_reg() const { return NotAMachineReg; } 1115 virtual uint match_edge(uint idx) const { return 0; } 1116 virtual const RegMask &out_RegMask() const; 1117 virtual const Type *bottom_type() const { return Type::MEMORY; } 1118 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1119 // sparse accessors 1120 // Fetch the previously stored "set_memory_at", or else the base memory. 1121 // (Caller should clone it if it is a phi-nest.) 1122 Node* memory_at(uint alias_idx) const; 1123 // set the memory, regardless of its previous value 1124 void set_memory_at(uint alias_idx, Node* n); 1125 // the "base" is the memory that provides the non-finite support 1126 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1127 // warning: setting the base can implicitly set any of the other slices too 1128 void set_base_memory(Node* def); 1129 // sentinel value which denotes a copy of the base memory: 1130 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1131 static Node* make_empty_memory(); // where the sentinel comes from 1132 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1133 // hook for the iterator, to perform any necessary setup 1134 void iteration_setup(const MergeMemNode* other = NULL); 1135 // push sentinels until I am at least as long as the other (semantic no-op) 1136 void grow_to_match(const MergeMemNode* other); 1137 bool verify_sparse() const PRODUCT_RETURN0; 1138#ifndef PRODUCT 1139 virtual void dump_spec(outputStream *st) const; 1140#endif 1141}; 1142 1143class MergeMemStream : public StackObj { 1144 private: 1145 MergeMemNode* _mm; 1146 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1147 Node* _mm_base; // loop-invariant base memory of _mm 1148 int _idx; 1149 int _cnt; 1150 Node* _mem; 1151 Node* _mem2; 1152 int _cnt2; 1153 1154 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1155 // subsume_node will break sparseness at times, whenever a memory slice 1156 // folds down to a copy of the base ("fat") memory. In such a case, 1157 // the raw edge will update to base, although it should be top. 1158 // This iterator will recognize either top or base_memory as an 1159 // "empty" slice. See is_empty, is_empty2, and next below. 1160 // 1161 // The sparseness property is repaired in MergeMemNode::Ideal. 1162 // As long as access to a MergeMem goes through this iterator 1163 // or the memory_at accessor, flaws in the sparseness will 1164 // never be observed. 1165 // 1166 // Also, iteration_setup repairs sparseness. 1167 assert(mm->verify_sparse(), "please, no dups of base"); 1168 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1169 1170 _mm = mm; 1171 _mm_base = mm->base_memory(); 1172 _mm2 = mm2; 1173 _cnt = mm->req(); 1174 _idx = Compile::AliasIdxBot-1; // start at the base memory 1175 _mem = NULL; 1176 _mem2 = NULL; 1177 } 1178 1179#ifdef ASSERT 1180 Node* check_memory() const { 1181 if (at_base_memory()) 1182 return _mm->base_memory(); 1183 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1184 return _mm->memory_at(_idx); 1185 else 1186 return _mm_base; 1187 } 1188 Node* check_memory2() const { 1189 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1190 } 1191#endif 1192 1193 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1194 void assert_synch() const { 1195 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1196 "no side-effects except through the stream"); 1197 } 1198 1199 public: 1200 1201 // expected usages: 1202 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1203 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1204 1205 // iterate over one merge 1206 MergeMemStream(MergeMemNode* mm) { 1207 mm->iteration_setup(); 1208 init(mm); 1209 debug_only(_cnt2 = 999); 1210 } 1211 // iterate in parallel over two merges 1212 // only iterates through non-empty elements of mm2 1213 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1214 assert(mm2, "second argument must be a MergeMem also"); 1215 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1216 mm->iteration_setup(mm2); 1217 init(mm, mm2); 1218 _cnt2 = mm2->req(); 1219 } 1220#ifdef ASSERT 1221 ~MergeMemStream() { 1222 assert_synch(); 1223 } 1224#endif 1225 1226 MergeMemNode* all_memory() const { 1227 return _mm; 1228 } 1229 Node* base_memory() const { 1230 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1231 return _mm_base; 1232 } 1233 const MergeMemNode* all_memory2() const { 1234 assert(_mm2 != NULL, ""); 1235 return _mm2; 1236 } 1237 bool at_base_memory() const { 1238 return _idx == Compile::AliasIdxBot; 1239 } 1240 int alias_idx() const { 1241 assert(_mem, "must call next 1st"); 1242 return _idx; 1243 } 1244 1245 const TypePtr* adr_type() const { 1246 return Compile::current()->get_adr_type(alias_idx()); 1247 } 1248 1249 const TypePtr* adr_type(Compile* C) const { 1250 return C->get_adr_type(alias_idx()); 1251 } 1252 bool is_empty() const { 1253 assert(_mem, "must call next 1st"); 1254 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1255 return _mem->is_top(); 1256 } 1257 bool is_empty2() const { 1258 assert(_mem2, "must call next 1st"); 1259 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1260 return _mem2->is_top(); 1261 } 1262 Node* memory() const { 1263 assert(!is_empty(), "must not be empty"); 1264 assert_synch(); 1265 return _mem; 1266 } 1267 // get the current memory, regardless of empty or non-empty status 1268 Node* force_memory() const { 1269 assert(!is_empty() || !at_base_memory(), ""); 1270 // Use _mm_base to defend against updates to _mem->base_memory(). 1271 Node *mem = _mem->is_top() ? _mm_base : _mem; 1272 assert(mem == check_memory(), ""); 1273 return mem; 1274 } 1275 Node* memory2() const { 1276 assert(_mem2 == check_memory2(), ""); 1277 return _mem2; 1278 } 1279 void set_memory(Node* mem) { 1280 if (at_base_memory()) { 1281 // Note that this does not change the invariant _mm_base. 1282 _mm->set_base_memory(mem); 1283 } else { 1284 _mm->set_memory_at(_idx, mem); 1285 } 1286 _mem = mem; 1287 assert_synch(); 1288 } 1289 1290 // Recover from a side effect to the MergeMemNode. 1291 void set_memory() { 1292 _mem = _mm->in(_idx); 1293 } 1294 1295 bool next() { return next(false); } 1296 bool next2() { return next(true); } 1297 1298 bool next_non_empty() { return next_non_empty(false); } 1299 bool next_non_empty2() { return next_non_empty(true); } 1300 // next_non_empty2 can yield states where is_empty() is true 1301 1302 private: 1303 // find the next item, which might be empty 1304 bool next(bool have_mm2) { 1305 assert((_mm2 != NULL) == have_mm2, "use other next"); 1306 assert_synch(); 1307 if (++_idx < _cnt) { 1308 // Note: This iterator allows _mm to be non-sparse. 1309 // It behaves the same whether _mem is top or base_memory. 1310 _mem = _mm->in(_idx); 1311 if (have_mm2) 1312 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1313 return true; 1314 } 1315 return false; 1316 } 1317 1318 // find the next non-empty item 1319 bool next_non_empty(bool have_mm2) { 1320 while (next(have_mm2)) { 1321 if (!is_empty()) { 1322 // make sure _mem2 is filled in sensibly 1323 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1324 return true; 1325 } else if (have_mm2 && !is_empty2()) { 1326 return true; // is_empty() == true 1327 } 1328 } 1329 return false; 1330 } 1331}; 1332 1333//------------------------------Prefetch--------------------------------------- 1334 1335// Non-faulting prefetch load. Prefetch for many reads. 1336class PrefetchReadNode : public Node { 1337public: 1338 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1339 virtual int Opcode() const; 1340 virtual uint ideal_reg() const { return NotAMachineReg; } 1341 virtual uint match_edge(uint idx) const { return idx==2; } 1342 virtual const Type *bottom_type() const { return Type::ABIO; } 1343}; 1344 1345// Non-faulting prefetch load. Prefetch for many reads & many writes. 1346class PrefetchWriteNode : public Node { 1347public: 1348 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1349 virtual int Opcode() const; 1350 virtual uint ideal_reg() const { return NotAMachineReg; } 1351 virtual uint match_edge(uint idx) const { return idx==2; } 1352 virtual const Type *bottom_type() const { return Type::ABIO; } 1353}; 1354 1355// Allocation prefetch which may fault, TLAB size have to be adjusted. 1356class PrefetchAllocationNode : public Node { 1357public: 1358 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1359 virtual int Opcode() const; 1360 virtual uint ideal_reg() const { return NotAMachineReg; } 1361 virtual uint match_edge(uint idx) const { return idx==2; } 1362 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1363}; 1364 1365#endif // SHARE_VM_OPTO_MEMNODE_HPP 1366