memnode.hpp revision 1879:f95d63e2154a
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OPTO_MEMNODE_HPP
26#define SHARE_VM_OPTO_MEMNODE_HPP
27
28#include "opto/multnode.hpp"
29#include "opto/node.hpp"
30#include "opto/opcodes.hpp"
31#include "opto/type.hpp"
32
33// Portions of code courtesy of Clifford Click
34
35class MultiNode;
36class PhaseCCP;
37class PhaseTransform;
38
39//------------------------------MemNode----------------------------------------
40// Load or Store, possibly throwing a NULL pointer exception
41class MemNode : public Node {
42protected:
43#ifdef ASSERT
44  const TypePtr* _adr_type;     // What kind of memory is being addressed?
45#endif
46  virtual uint size_of() const; // Size is bigger (ASSERT only)
47public:
48  enum { Control,               // When is it safe to do this load?
49         Memory,                // Chunk of memory is being loaded from
50         Address,               // Actually address, derived from base
51         ValueIn,               // Value to store
52         OopStore               // Preceeding oop store, only in StoreCM
53  };
54protected:
55  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
56    : Node(c0,c1,c2   ) {
57    init_class_id(Class_Mem);
58    debug_only(_adr_type=at; adr_type();)
59  }
60  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
61    : Node(c0,c1,c2,c3) {
62    init_class_id(Class_Mem);
63    debug_only(_adr_type=at; adr_type();)
64  }
65  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
66    : Node(c0,c1,c2,c3,c4) {
67    init_class_id(Class_Mem);
68    debug_only(_adr_type=at; adr_type();)
69  }
70
71public:
72  // Helpers for the optimizer.  Documented in memnode.cpp.
73  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
74                                      Node* p2, AllocateNode* a2,
75                                      PhaseTransform* phase);
76  static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
77
78  static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
79  static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
80  // This one should probably be a phase-specific function:
81  static bool all_controls_dominate(Node* dom, Node* sub);
82
83  // Find any cast-away of null-ness and keep its control.
84  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
85  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
86
87  virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
88
89  // Shared code for Ideal methods:
90  Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
91
92  // Helper function for adr_type() implementations.
93  static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
94
95  // Raw access function, to allow copying of adr_type efficiently in
96  // product builds and retain the debug info for debug builds.
97  const TypePtr *raw_adr_type() const {
98#ifdef ASSERT
99    return _adr_type;
100#else
101    return 0;
102#endif
103  }
104
105  // Map a load or store opcode to its corresponding store opcode.
106  // (Return -1 if unknown.)
107  virtual int store_Opcode() const { return -1; }
108
109  // What is the type of the value in memory?  (T_VOID mean "unspecified".)
110  virtual BasicType memory_type() const = 0;
111  virtual int memory_size() const {
112#ifdef ASSERT
113    return type2aelembytes(memory_type(), true);
114#else
115    return type2aelembytes(memory_type());
116#endif
117  }
118
119  // Search through memory states which precede this node (load or store).
120  // Look for an exact match for the address, with no intervening
121  // aliased stores.
122  Node* find_previous_store(PhaseTransform* phase);
123
124  // Can this node (load or store) accurately see a stored value in
125  // the given memory state?  (The state may or may not be in(Memory).)
126  Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
127
128#ifndef PRODUCT
129  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
130  virtual void dump_spec(outputStream *st) const;
131#endif
132};
133
134//------------------------------LoadNode---------------------------------------
135// Load value; requires Memory and Address
136class LoadNode : public MemNode {
137protected:
138  virtual uint cmp( const Node &n ) const;
139  virtual uint size_of() const; // Size is bigger
140  const Type* const _type;      // What kind of value is loaded?
141public:
142
143  LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
144    : MemNode(c,mem,adr,at), _type(rt) {
145    init_class_id(Class_Load);
146  }
147
148  // Polymorphic factory method:
149  static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
150                     const TypePtr* at, const Type *rt, BasicType bt );
151
152  virtual uint hash()   const;  // Check the type
153
154  // Handle algebraic identities here.  If we have an identity, return the Node
155  // we are equivalent to.  We look for Load of a Store.
156  virtual Node *Identity( PhaseTransform *phase );
157
158  // If the load is from Field memory and the pointer is non-null, we can
159  // zero out the control input.
160  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
161
162  // Split instance field load through Phi.
163  Node* split_through_phi(PhaseGVN *phase);
164
165  // Recover original value from boxed values
166  Node *eliminate_autobox(PhaseGVN *phase);
167
168  // Compute a new Type for this node.  Basically we just do the pre-check,
169  // then call the virtual add() to set the type.
170  virtual const Type *Value( PhaseTransform *phase ) const;
171
172  // Common methods for LoadKlass and LoadNKlass nodes.
173  const Type *klass_value_common( PhaseTransform *phase ) const;
174  Node *klass_identity_common( PhaseTransform *phase );
175
176  virtual uint ideal_reg() const;
177  virtual const Type *bottom_type() const;
178  // Following method is copied from TypeNode:
179  void set_type(const Type* t) {
180    assert(t != NULL, "sanity");
181    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
182    *(const Type**)&_type = t;   // cast away const-ness
183    // If this node is in the hash table, make sure it doesn't need a rehash.
184    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
185  }
186  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
187
188  // Do not match memory edge
189  virtual uint match_edge(uint idx) const;
190
191  // Map a load opcode to its corresponding store opcode.
192  virtual int store_Opcode() const = 0;
193
194  // Check if the load's memory input is a Phi node with the same control.
195  bool is_instance_field_load_with_local_phi(Node* ctrl);
196
197#ifndef PRODUCT
198  virtual void dump_spec(outputStream *st) const;
199#endif
200#ifdef ASSERT
201  // Helper function to allow a raw load without control edge for some cases
202  static bool is_immutable_value(Node* adr);
203#endif
204protected:
205  const Type* load_array_final_field(const TypeKlassPtr *tkls,
206                                     ciKlass* klass) const;
207};
208
209//------------------------------LoadBNode--------------------------------------
210// Load a byte (8bits signed) from memory
211class LoadBNode : public LoadNode {
212public:
213  LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
214    : LoadNode(c,mem,adr,at,ti) {}
215  virtual int Opcode() const;
216  virtual uint ideal_reg() const { return Op_RegI; }
217  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
218  virtual int store_Opcode() const { return Op_StoreB; }
219  virtual BasicType memory_type() const { return T_BYTE; }
220};
221
222//------------------------------LoadUBNode-------------------------------------
223// Load a unsigned byte (8bits unsigned) from memory
224class LoadUBNode : public LoadNode {
225public:
226  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
227    : LoadNode(c, mem, adr, at, ti) {}
228  virtual int Opcode() const;
229  virtual uint ideal_reg() const { return Op_RegI; }
230  virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
231  virtual int store_Opcode() const { return Op_StoreB; }
232  virtual BasicType memory_type() const { return T_BYTE; }
233};
234
235//------------------------------LoadUSNode-------------------------------------
236// Load an unsigned short/char (16bits unsigned) from memory
237class LoadUSNode : public LoadNode {
238public:
239  LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
240    : LoadNode(c,mem,adr,at,ti) {}
241  virtual int Opcode() const;
242  virtual uint ideal_reg() const { return Op_RegI; }
243  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
244  virtual int store_Opcode() const { return Op_StoreC; }
245  virtual BasicType memory_type() const { return T_CHAR; }
246};
247
248//------------------------------LoadINode--------------------------------------
249// Load an integer from memory
250class LoadINode : public LoadNode {
251public:
252  LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
253    : LoadNode(c,mem,adr,at,ti) {}
254  virtual int Opcode() const;
255  virtual uint ideal_reg() const { return Op_RegI; }
256  virtual int store_Opcode() const { return Op_StoreI; }
257  virtual BasicType memory_type() const { return T_INT; }
258};
259
260//------------------------------LoadUI2LNode-----------------------------------
261// Load an unsigned integer into long from memory
262class LoadUI2LNode : public LoadNode {
263public:
264  LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
265    : LoadNode(c, mem, adr, at, t) {}
266  virtual int Opcode() const;
267  virtual uint ideal_reg() const { return Op_RegL; }
268  virtual int store_Opcode() const { return Op_StoreL; }
269  virtual BasicType memory_type() const { return T_LONG; }
270};
271
272//------------------------------LoadRangeNode----------------------------------
273// Load an array length from the array
274class LoadRangeNode : public LoadINode {
275public:
276  LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
277    : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
278  virtual int Opcode() const;
279  virtual const Type *Value( PhaseTransform *phase ) const;
280  virtual Node *Identity( PhaseTransform *phase );
281  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
282};
283
284//------------------------------LoadLNode--------------------------------------
285// Load a long from memory
286class LoadLNode : public LoadNode {
287  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
288  virtual uint cmp( const Node &n ) const {
289    return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
290      && LoadNode::cmp(n);
291  }
292  virtual uint size_of() const { return sizeof(*this); }
293  const bool _require_atomic_access;  // is piecewise load forbidden?
294
295public:
296  LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
297             const TypeLong *tl = TypeLong::LONG,
298             bool require_atomic_access = false )
299    : LoadNode(c,mem,adr,at,tl)
300    , _require_atomic_access(require_atomic_access)
301  {}
302  virtual int Opcode() const;
303  virtual uint ideal_reg() const { return Op_RegL; }
304  virtual int store_Opcode() const { return Op_StoreL; }
305  virtual BasicType memory_type() const { return T_LONG; }
306  bool require_atomic_access() { return _require_atomic_access; }
307  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
308#ifndef PRODUCT
309  virtual void dump_spec(outputStream *st) const {
310    LoadNode::dump_spec(st);
311    if (_require_atomic_access)  st->print(" Atomic!");
312  }
313#endif
314};
315
316//------------------------------LoadL_unalignedNode----------------------------
317// Load a long from unaligned memory
318class LoadL_unalignedNode : public LoadLNode {
319public:
320  LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
321    : LoadLNode(c,mem,adr,at) {}
322  virtual int Opcode() const;
323};
324
325//------------------------------LoadFNode--------------------------------------
326// Load a float (64 bits) from memory
327class LoadFNode : public LoadNode {
328public:
329  LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
330    : LoadNode(c,mem,adr,at,t) {}
331  virtual int Opcode() const;
332  virtual uint ideal_reg() const { return Op_RegF; }
333  virtual int store_Opcode() const { return Op_StoreF; }
334  virtual BasicType memory_type() const { return T_FLOAT; }
335};
336
337//------------------------------LoadDNode--------------------------------------
338// Load a double (64 bits) from memory
339class LoadDNode : public LoadNode {
340public:
341  LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
342    : LoadNode(c,mem,adr,at,t) {}
343  virtual int Opcode() const;
344  virtual uint ideal_reg() const { return Op_RegD; }
345  virtual int store_Opcode() const { return Op_StoreD; }
346  virtual BasicType memory_type() const { return T_DOUBLE; }
347};
348
349//------------------------------LoadD_unalignedNode----------------------------
350// Load a double from unaligned memory
351class LoadD_unalignedNode : public LoadDNode {
352public:
353  LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
354    : LoadDNode(c,mem,adr,at) {}
355  virtual int Opcode() const;
356};
357
358//------------------------------LoadPNode--------------------------------------
359// Load a pointer from memory (either object or array)
360class LoadPNode : public LoadNode {
361public:
362  LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
363    : LoadNode(c,mem,adr,at,t) {}
364  virtual int Opcode() const;
365  virtual uint ideal_reg() const { return Op_RegP; }
366  virtual int store_Opcode() const { return Op_StoreP; }
367  virtual BasicType memory_type() const { return T_ADDRESS; }
368  // depends_only_on_test is almost always true, and needs to be almost always
369  // true to enable key hoisting & commoning optimizations.  However, for the
370  // special case of RawPtr loads from TLS top & end, the control edge carries
371  // the dependence preventing hoisting past a Safepoint instead of the memory
372  // edge.  (An unfortunate consequence of having Safepoints not set Raw
373  // Memory; itself an unfortunate consequence of having Nodes which produce
374  // results (new raw memory state) inside of loops preventing all manner of
375  // other optimizations).  Basically, it's ugly but so is the alternative.
376  // See comment in macro.cpp, around line 125 expand_allocate_common().
377  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
378};
379
380
381//------------------------------LoadNNode--------------------------------------
382// Load a narrow oop from memory (either object or array)
383class LoadNNode : public LoadNode {
384public:
385  LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
386    : LoadNode(c,mem,adr,at,t) {}
387  virtual int Opcode() const;
388  virtual uint ideal_reg() const { return Op_RegN; }
389  virtual int store_Opcode() const { return Op_StoreN; }
390  virtual BasicType memory_type() const { return T_NARROWOOP; }
391  // depends_only_on_test is almost always true, and needs to be almost always
392  // true to enable key hoisting & commoning optimizations.  However, for the
393  // special case of RawPtr loads from TLS top & end, the control edge carries
394  // the dependence preventing hoisting past a Safepoint instead of the memory
395  // edge.  (An unfortunate consequence of having Safepoints not set Raw
396  // Memory; itself an unfortunate consequence of having Nodes which produce
397  // results (new raw memory state) inside of loops preventing all manner of
398  // other optimizations).  Basically, it's ugly but so is the alternative.
399  // See comment in macro.cpp, around line 125 expand_allocate_common().
400  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
401};
402
403//------------------------------LoadKlassNode----------------------------------
404// Load a Klass from an object
405class LoadKlassNode : public LoadPNode {
406public:
407  LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
408    : LoadPNode(c,mem,adr,at,tk) {}
409  virtual int Opcode() const;
410  virtual const Type *Value( PhaseTransform *phase ) const;
411  virtual Node *Identity( PhaseTransform *phase );
412  virtual bool depends_only_on_test() const { return true; }
413
414  // Polymorphic factory method:
415  static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
416                     const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
417};
418
419//------------------------------LoadNKlassNode---------------------------------
420// Load a narrow Klass from an object.
421class LoadNKlassNode : public LoadNNode {
422public:
423  LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
424    : LoadNNode(c,mem,adr,at,tk) {}
425  virtual int Opcode() const;
426  virtual uint ideal_reg() const { return Op_RegN; }
427  virtual int store_Opcode() const { return Op_StoreN; }
428  virtual BasicType memory_type() const { return T_NARROWOOP; }
429
430  virtual const Type *Value( PhaseTransform *phase ) const;
431  virtual Node *Identity( PhaseTransform *phase );
432  virtual bool depends_only_on_test() const { return true; }
433};
434
435
436//------------------------------LoadSNode--------------------------------------
437// Load a short (16bits signed) from memory
438class LoadSNode : public LoadNode {
439public:
440  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
441    : LoadNode(c,mem,adr,at,ti) {}
442  virtual int Opcode() const;
443  virtual uint ideal_reg() const { return Op_RegI; }
444  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
445  virtual int store_Opcode() const { return Op_StoreC; }
446  virtual BasicType memory_type() const { return T_SHORT; }
447};
448
449//------------------------------StoreNode--------------------------------------
450// Store value; requires Store, Address and Value
451class StoreNode : public MemNode {
452protected:
453  virtual uint cmp( const Node &n ) const;
454  virtual bool depends_only_on_test() const { return false; }
455
456  Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
457  Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
458
459public:
460  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
461    : MemNode(c,mem,adr,at,val) {
462    init_class_id(Class_Store);
463  }
464  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
465    : MemNode(c,mem,adr,at,val,oop_store) {
466    init_class_id(Class_Store);
467  }
468
469  // Polymorphic factory method:
470  static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
471                          const TypePtr* at, Node *val, BasicType bt );
472
473  virtual uint hash() const;    // Check the type
474
475  // If the store is to Field memory and the pointer is non-null, we can
476  // zero out the control input.
477  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
478
479  // Compute a new Type for this node.  Basically we just do the pre-check,
480  // then call the virtual add() to set the type.
481  virtual const Type *Value( PhaseTransform *phase ) const;
482
483  // Check for identity function on memory (Load then Store at same address)
484  virtual Node *Identity( PhaseTransform *phase );
485
486  // Do not match memory edge
487  virtual uint match_edge(uint idx) const;
488
489  virtual const Type *bottom_type() const;  // returns Type::MEMORY
490
491  // Map a store opcode to its corresponding own opcode, trivially.
492  virtual int store_Opcode() const { return Opcode(); }
493
494  // have all possible loads of the value stored been optimized away?
495  bool value_never_loaded(PhaseTransform *phase) const;
496};
497
498//------------------------------StoreBNode-------------------------------------
499// Store byte to memory
500class StoreBNode : public StoreNode {
501public:
502  StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
503  virtual int Opcode() const;
504  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
505  virtual BasicType memory_type() const { return T_BYTE; }
506};
507
508//------------------------------StoreCNode-------------------------------------
509// Store char/short to memory
510class StoreCNode : public StoreNode {
511public:
512  StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
513  virtual int Opcode() const;
514  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
515  virtual BasicType memory_type() const { return T_CHAR; }
516};
517
518//------------------------------StoreINode-------------------------------------
519// Store int to memory
520class StoreINode : public StoreNode {
521public:
522  StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
523  virtual int Opcode() const;
524  virtual BasicType memory_type() const { return T_INT; }
525};
526
527//------------------------------StoreLNode-------------------------------------
528// Store long to memory
529class StoreLNode : public StoreNode {
530  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
531  virtual uint cmp( const Node &n ) const {
532    return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
533      && StoreNode::cmp(n);
534  }
535  virtual uint size_of() const { return sizeof(*this); }
536  const bool _require_atomic_access;  // is piecewise store forbidden?
537
538public:
539  StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
540              bool require_atomic_access = false )
541    : StoreNode(c,mem,adr,at,val)
542    , _require_atomic_access(require_atomic_access)
543  {}
544  virtual int Opcode() const;
545  virtual BasicType memory_type() const { return T_LONG; }
546  bool require_atomic_access() { return _require_atomic_access; }
547  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
548#ifndef PRODUCT
549  virtual void dump_spec(outputStream *st) const {
550    StoreNode::dump_spec(st);
551    if (_require_atomic_access)  st->print(" Atomic!");
552  }
553#endif
554};
555
556//------------------------------StoreFNode-------------------------------------
557// Store float to memory
558class StoreFNode : public StoreNode {
559public:
560  StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
561  virtual int Opcode() const;
562  virtual BasicType memory_type() const { return T_FLOAT; }
563};
564
565//------------------------------StoreDNode-------------------------------------
566// Store double to memory
567class StoreDNode : public StoreNode {
568public:
569  StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
570  virtual int Opcode() const;
571  virtual BasicType memory_type() const { return T_DOUBLE; }
572};
573
574//------------------------------StorePNode-------------------------------------
575// Store pointer to memory
576class StorePNode : public StoreNode {
577public:
578  StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
579  virtual int Opcode() const;
580  virtual BasicType memory_type() const { return T_ADDRESS; }
581};
582
583//------------------------------StoreNNode-------------------------------------
584// Store narrow oop to memory
585class StoreNNode : public StoreNode {
586public:
587  StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
588  virtual int Opcode() const;
589  virtual BasicType memory_type() const { return T_NARROWOOP; }
590};
591
592//------------------------------StoreCMNode-----------------------------------
593// Store card-mark byte to memory for CM
594// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
595// Preceeding equivalent StoreCMs may be eliminated.
596class StoreCMNode : public StoreNode {
597 private:
598  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
599  virtual uint cmp( const Node &n ) const {
600    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
601      && StoreNode::cmp(n);
602  }
603  virtual uint size_of() const { return sizeof(*this); }
604  int _oop_alias_idx;   // The alias_idx of OopStore
605
606public:
607  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
608    StoreNode(c,mem,adr,at,val,oop_store),
609    _oop_alias_idx(oop_alias_idx) {
610    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
611           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
612           "bad oop alias idx");
613  }
614  virtual int Opcode() const;
615  virtual Node *Identity( PhaseTransform *phase );
616  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
617  virtual const Type *Value( PhaseTransform *phase ) const;
618  virtual BasicType memory_type() const { return T_VOID; } // unspecific
619  int oop_alias_idx() const { return _oop_alias_idx; }
620};
621
622//------------------------------LoadPLockedNode---------------------------------
623// Load-locked a pointer from memory (either object or array).
624// On Sparc & Intel this is implemented as a normal pointer load.
625// On PowerPC and friends it's a real load-locked.
626class LoadPLockedNode : public LoadPNode {
627public:
628  LoadPLockedNode( Node *c, Node *mem, Node *adr )
629    : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
630  virtual int Opcode() const;
631  virtual int store_Opcode() const { return Op_StorePConditional; }
632  virtual bool depends_only_on_test() const { return true; }
633};
634
635//------------------------------LoadLLockedNode---------------------------------
636// Load-locked a pointer from memory (either object or array).
637// On Sparc & Intel this is implemented as a normal long load.
638class LoadLLockedNode : public LoadLNode {
639public:
640  LoadLLockedNode( Node *c, Node *mem, Node *adr )
641    : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
642  virtual int Opcode() const;
643  virtual int store_Opcode() const { return Op_StoreLConditional; }
644};
645
646//------------------------------SCMemProjNode---------------------------------------
647// This class defines a projection of the memory  state of a store conditional node.
648// These nodes return a value, but also update memory.
649class SCMemProjNode : public ProjNode {
650public:
651  enum {SCMEMPROJCON = (uint)-2};
652  SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
653  virtual int Opcode() const;
654  virtual bool      is_CFG() const  { return false; }
655  virtual const Type *bottom_type() const {return Type::MEMORY;}
656  virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
657  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
658  virtual const Type *Value( PhaseTransform *phase ) const;
659#ifndef PRODUCT
660  virtual void dump_spec(outputStream *st) const {};
661#endif
662};
663
664//------------------------------LoadStoreNode---------------------------
665// Note: is_Mem() method returns 'true' for this class.
666class LoadStoreNode : public Node {
667public:
668  enum {
669    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
670  };
671  LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
672  virtual bool depends_only_on_test() const { return false; }
673  virtual const Type *bottom_type() const { return TypeInt::BOOL; }
674  virtual uint ideal_reg() const { return Op_RegI; }
675  virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
676};
677
678//------------------------------StorePConditionalNode---------------------------
679// Conditionally store pointer to memory, if no change since prior
680// load-locked.  Sets flags for success or failure of the store.
681class StorePConditionalNode : public LoadStoreNode {
682public:
683  StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
684  virtual int Opcode() const;
685  // Produces flags
686  virtual uint ideal_reg() const { return Op_RegFlags; }
687};
688
689//------------------------------StoreIConditionalNode---------------------------
690// Conditionally store int to memory, if no change since prior
691// load-locked.  Sets flags for success or failure of the store.
692class StoreIConditionalNode : public LoadStoreNode {
693public:
694  StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
695  virtual int Opcode() const;
696  // Produces flags
697  virtual uint ideal_reg() const { return Op_RegFlags; }
698};
699
700//------------------------------StoreLConditionalNode---------------------------
701// Conditionally store long to memory, if no change since prior
702// load-locked.  Sets flags for success or failure of the store.
703class StoreLConditionalNode : public LoadStoreNode {
704public:
705  StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
706  virtual int Opcode() const;
707  // Produces flags
708  virtual uint ideal_reg() const { return Op_RegFlags; }
709};
710
711
712//------------------------------CompareAndSwapLNode---------------------------
713class CompareAndSwapLNode : public LoadStoreNode {
714public:
715  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
716  virtual int Opcode() const;
717};
718
719
720//------------------------------CompareAndSwapINode---------------------------
721class CompareAndSwapINode : public LoadStoreNode {
722public:
723  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
724  virtual int Opcode() const;
725};
726
727
728//------------------------------CompareAndSwapPNode---------------------------
729class CompareAndSwapPNode : public LoadStoreNode {
730public:
731  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
732  virtual int Opcode() const;
733};
734
735//------------------------------CompareAndSwapNNode---------------------------
736class CompareAndSwapNNode : public LoadStoreNode {
737public:
738  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
739  virtual int Opcode() const;
740};
741
742//------------------------------ClearArray-------------------------------------
743class ClearArrayNode: public Node {
744public:
745  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
746    : Node(ctrl,arymem,word_cnt,base) {
747    init_class_id(Class_ClearArray);
748  }
749  virtual int         Opcode() const;
750  virtual const Type *bottom_type() const { return Type::MEMORY; }
751  // ClearArray modifies array elements, and so affects only the
752  // array memory addressed by the bottom_type of its base address.
753  virtual const class TypePtr *adr_type() const;
754  virtual Node *Identity( PhaseTransform *phase );
755  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
756  virtual uint match_edge(uint idx) const;
757
758  // Clear the given area of an object or array.
759  // The start offset must always be aligned mod BytesPerInt.
760  // The end offset must always be aligned mod BytesPerLong.
761  // Return the new memory.
762  static Node* clear_memory(Node* control, Node* mem, Node* dest,
763                            intptr_t start_offset,
764                            intptr_t end_offset,
765                            PhaseGVN* phase);
766  static Node* clear_memory(Node* control, Node* mem, Node* dest,
767                            intptr_t start_offset,
768                            Node* end_offset,
769                            PhaseGVN* phase);
770  static Node* clear_memory(Node* control, Node* mem, Node* dest,
771                            Node* start_offset,
772                            Node* end_offset,
773                            PhaseGVN* phase);
774  // Return allocation input memory edge if it is different instance
775  // or itself if it is the one we are looking for.
776  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
777};
778
779//------------------------------StrComp-------------------------------------
780class StrCompNode: public Node {
781public:
782  StrCompNode(Node* control, Node* char_array_mem,
783              Node* s1, Node* c1,
784              Node* s2, Node* c2): Node(control, char_array_mem,
785                                        s1, c1,
786                                        s2, c2) {};
787  virtual int Opcode() const;
788  virtual bool depends_only_on_test() const { return false; }
789  virtual const Type* bottom_type() const { return TypeInt::INT; }
790  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
791  virtual uint match_edge(uint idx) const;
792  virtual uint ideal_reg() const { return Op_RegI; }
793  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
794};
795
796//------------------------------StrEquals-------------------------------------
797class StrEqualsNode: public Node {
798public:
799  StrEqualsNode(Node* control, Node* char_array_mem,
800                Node* s1, Node* s2, Node* c): Node(control, char_array_mem,
801                                                   s1, s2, c) {};
802  virtual int Opcode() const;
803  virtual bool depends_only_on_test() const { return false; }
804  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
805  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
806  virtual uint match_edge(uint idx) const;
807  virtual uint ideal_reg() const { return Op_RegI; }
808  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
809};
810
811//------------------------------StrIndexOf-------------------------------------
812class StrIndexOfNode: public Node {
813public:
814  StrIndexOfNode(Node* control, Node* char_array_mem,
815                 Node* s1, Node* c1,
816                 Node* s2, Node* c2): Node(control, char_array_mem,
817                                           s1, c1,
818                                           s2, c2) {};
819  virtual int Opcode() const;
820  virtual bool depends_only_on_test() const { return false; }
821  virtual const Type* bottom_type() const { return TypeInt::INT; }
822  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
823  virtual uint match_edge(uint idx) const;
824  virtual uint ideal_reg() const { return Op_RegI; }
825  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
826};
827
828//------------------------------AryEq---------------------------------------
829class AryEqNode: public Node {
830public:
831  AryEqNode(Node* control, Node* char_array_mem,
832            Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {};
833  virtual int Opcode() const;
834  virtual bool depends_only_on_test() const { return false; }
835  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
836  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
837  virtual uint match_edge(uint idx) const;
838  virtual uint ideal_reg() const { return Op_RegI; }
839  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
840};
841
842//------------------------------MemBar-----------------------------------------
843// There are different flavors of Memory Barriers to match the Java Memory
844// Model.  Monitor-enter and volatile-load act as Aquires: no following ref
845// can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
846// volatile-load.  Monitor-exit and volatile-store act as Release: no
847// preceding ref can be moved to after them.  We insert a MemBar-Release
848// before a FastUnlock or volatile-store.  All volatiles need to be
849// serialized, so we follow all volatile-stores with a MemBar-Volatile to
850// separate it from any following volatile-load.
851class MemBarNode: public MultiNode {
852  virtual uint hash() const ;                  // { return NO_HASH; }
853  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
854
855  virtual uint size_of() const { return sizeof(*this); }
856  // Memory type this node is serializing.  Usually either rawptr or bottom.
857  const TypePtr* _adr_type;
858
859public:
860  enum {
861    Precedent = TypeFunc::Parms  // optional edge to force precedence
862  };
863  MemBarNode(Compile* C, int alias_idx, Node* precedent);
864  virtual int Opcode() const = 0;
865  virtual const class TypePtr *adr_type() const { return _adr_type; }
866  virtual const Type *Value( PhaseTransform *phase ) const;
867  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
868  virtual uint match_edge(uint idx) const { return 0; }
869  virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
870  virtual Node *match( const ProjNode *proj, const Matcher *m );
871  // Factory method.  Builds a wide or narrow membar.
872  // Optional 'precedent' becomes an extra edge if not null.
873  static MemBarNode* make(Compile* C, int opcode,
874                          int alias_idx = Compile::AliasIdxBot,
875                          Node* precedent = NULL);
876};
877
878// "Acquire" - no following ref can move before (but earlier refs can
879// follow, like an early Load stalled in cache).  Requires multi-cpu
880// visibility.  Inserted after a volatile load or FastLock.
881class MemBarAcquireNode: public MemBarNode {
882public:
883  MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
884    : MemBarNode(C, alias_idx, precedent) {}
885  virtual int Opcode() const;
886};
887
888// "Release" - no earlier ref can move after (but later refs can move
889// up, like a speculative pipelined cache-hitting Load).  Requires
890// multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
891class MemBarReleaseNode: public MemBarNode {
892public:
893  MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
894    : MemBarNode(C, alias_idx, precedent) {}
895  virtual int Opcode() const;
896};
897
898// Ordering between a volatile store and a following volatile load.
899// Requires multi-CPU visibility?
900class MemBarVolatileNode: public MemBarNode {
901public:
902  MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
903    : MemBarNode(C, alias_idx, precedent) {}
904  virtual int Opcode() const;
905};
906
907// Ordering within the same CPU.  Used to order unsafe memory references
908// inside the compiler when we lack alias info.  Not needed "outside" the
909// compiler because the CPU does all the ordering for us.
910class MemBarCPUOrderNode: public MemBarNode {
911public:
912  MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
913    : MemBarNode(C, alias_idx, precedent) {}
914  virtual int Opcode() const;
915  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
916};
917
918// Isolation of object setup after an AllocateNode and before next safepoint.
919// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
920class InitializeNode: public MemBarNode {
921  friend class AllocateNode;
922
923  bool _is_complete;
924
925public:
926  enum {
927    Control    = TypeFunc::Control,
928    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
929    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
930    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
931  };
932
933  InitializeNode(Compile* C, int adr_type, Node* rawoop);
934  virtual int Opcode() const;
935  virtual uint size_of() const { return sizeof(*this); }
936  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
937  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
938
939  // Manage incoming memory edges via a MergeMem on in(Memory):
940  Node* memory(uint alias_idx);
941
942  // The raw memory edge coming directly from the Allocation.
943  // The contents of this memory are *always* all-zero-bits.
944  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
945
946  // Return the corresponding allocation for this initialization (or null if none).
947  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
948  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
949  AllocateNode* allocation();
950
951  // Anything other than zeroing in this init?
952  bool is_non_zero();
953
954  // An InitializeNode must completed before macro expansion is done.
955  // Completion requires that the AllocateNode must be followed by
956  // initialization of the new memory to zero, then to any initializers.
957  bool is_complete() { return _is_complete; }
958
959  // Mark complete.  (Must not yet be complete.)
960  void set_complete(PhaseGVN* phase);
961
962#ifdef ASSERT
963  // ensure all non-degenerate stores are ordered and non-overlapping
964  bool stores_are_sane(PhaseTransform* phase);
965#endif //ASSERT
966
967  // See if this store can be captured; return offset where it initializes.
968  // Return 0 if the store cannot be moved (any sort of problem).
969  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
970
971  // Capture another store; reformat it to write my internal raw memory.
972  // Return the captured copy, else NULL if there is some sort of problem.
973  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
974
975  // Find captured store which corresponds to the range [start..start+size).
976  // Return my own memory projection (meaning the initial zero bits)
977  // if there is no such store.  Return NULL if there is a problem.
978  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
979
980  // Called when the associated AllocateNode is expanded into CFG.
981  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
982                        intptr_t header_size, Node* size_in_bytes,
983                        PhaseGVN* phase);
984
985 private:
986  void remove_extra_zeroes();
987
988  // Find out where a captured store should be placed (or already is placed).
989  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
990                                     PhaseTransform* phase);
991
992  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
993
994  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
995
996  bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
997
998  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
999                               PhaseGVN* phase);
1000
1001  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1002};
1003
1004//------------------------------MergeMem---------------------------------------
1005// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1006class MergeMemNode: public Node {
1007  virtual uint hash() const ;                  // { return NO_HASH; }
1008  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1009  friend class MergeMemStream;
1010  MergeMemNode(Node* def);  // clients use MergeMemNode::make
1011
1012public:
1013  // If the input is a whole memory state, clone it with all its slices intact.
1014  // Otherwise, make a new memory state with just that base memory input.
1015  // In either case, the result is a newly created MergeMem.
1016  static MergeMemNode* make(Compile* C, Node* base_memory);
1017
1018  virtual int Opcode() const;
1019  virtual Node *Identity( PhaseTransform *phase );
1020  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1021  virtual uint ideal_reg() const { return NotAMachineReg; }
1022  virtual uint match_edge(uint idx) const { return 0; }
1023  virtual const RegMask &out_RegMask() const;
1024  virtual const Type *bottom_type() const { return Type::MEMORY; }
1025  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1026  // sparse accessors
1027  // Fetch the previously stored "set_memory_at", or else the base memory.
1028  // (Caller should clone it if it is a phi-nest.)
1029  Node* memory_at(uint alias_idx) const;
1030  // set the memory, regardless of its previous value
1031  void set_memory_at(uint alias_idx, Node* n);
1032  // the "base" is the memory that provides the non-finite support
1033  Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1034  // warning: setting the base can implicitly set any of the other slices too
1035  void set_base_memory(Node* def);
1036  // sentinel value which denotes a copy of the base memory:
1037  Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1038  static Node* make_empty_memory(); // where the sentinel comes from
1039  bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1040  // hook for the iterator, to perform any necessary setup
1041  void iteration_setup(const MergeMemNode* other = NULL);
1042  // push sentinels until I am at least as long as the other (semantic no-op)
1043  void grow_to_match(const MergeMemNode* other);
1044  bool verify_sparse() const PRODUCT_RETURN0;
1045#ifndef PRODUCT
1046  virtual void dump_spec(outputStream *st) const;
1047#endif
1048};
1049
1050class MergeMemStream : public StackObj {
1051 private:
1052  MergeMemNode*       _mm;
1053  const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1054  Node*               _mm_base;  // loop-invariant base memory of _mm
1055  int                 _idx;
1056  int                 _cnt;
1057  Node*               _mem;
1058  Node*               _mem2;
1059  int                 _cnt2;
1060
1061  void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1062    // subsume_node will break sparseness at times, whenever a memory slice
1063    // folds down to a copy of the base ("fat") memory.  In such a case,
1064    // the raw edge will update to base, although it should be top.
1065    // This iterator will recognize either top or base_memory as an
1066    // "empty" slice.  See is_empty, is_empty2, and next below.
1067    //
1068    // The sparseness property is repaired in MergeMemNode::Ideal.
1069    // As long as access to a MergeMem goes through this iterator
1070    // or the memory_at accessor, flaws in the sparseness will
1071    // never be observed.
1072    //
1073    // Also, iteration_setup repairs sparseness.
1074    assert(mm->verify_sparse(), "please, no dups of base");
1075    assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1076
1077    _mm  = mm;
1078    _mm_base = mm->base_memory();
1079    _mm2 = mm2;
1080    _cnt = mm->req();
1081    _idx = Compile::AliasIdxBot-1; // start at the base memory
1082    _mem = NULL;
1083    _mem2 = NULL;
1084  }
1085
1086#ifdef ASSERT
1087  Node* check_memory() const {
1088    if (at_base_memory())
1089      return _mm->base_memory();
1090    else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1091      return _mm->memory_at(_idx);
1092    else
1093      return _mm_base;
1094  }
1095  Node* check_memory2() const {
1096    return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1097  }
1098#endif
1099
1100  static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1101  void assert_synch() const {
1102    assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1103           "no side-effects except through the stream");
1104  }
1105
1106 public:
1107
1108  // expected usages:
1109  // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1110  // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1111
1112  // iterate over one merge
1113  MergeMemStream(MergeMemNode* mm) {
1114    mm->iteration_setup();
1115    init(mm);
1116    debug_only(_cnt2 = 999);
1117  }
1118  // iterate in parallel over two merges
1119  // only iterates through non-empty elements of mm2
1120  MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1121    assert(mm2, "second argument must be a MergeMem also");
1122    ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1123    mm->iteration_setup(mm2);
1124    init(mm, mm2);
1125    _cnt2 = mm2->req();
1126  }
1127#ifdef ASSERT
1128  ~MergeMemStream() {
1129    assert_synch();
1130  }
1131#endif
1132
1133  MergeMemNode* all_memory() const {
1134    return _mm;
1135  }
1136  Node* base_memory() const {
1137    assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1138    return _mm_base;
1139  }
1140  const MergeMemNode* all_memory2() const {
1141    assert(_mm2 != NULL, "");
1142    return _mm2;
1143  }
1144  bool at_base_memory() const {
1145    return _idx == Compile::AliasIdxBot;
1146  }
1147  int alias_idx() const {
1148    assert(_mem, "must call next 1st");
1149    return _idx;
1150  }
1151
1152  const TypePtr* adr_type() const {
1153    return Compile::current()->get_adr_type(alias_idx());
1154  }
1155
1156  const TypePtr* adr_type(Compile* C) const {
1157    return C->get_adr_type(alias_idx());
1158  }
1159  bool is_empty() const {
1160    assert(_mem, "must call next 1st");
1161    assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1162    return _mem->is_top();
1163  }
1164  bool is_empty2() const {
1165    assert(_mem2, "must call next 1st");
1166    assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1167    return _mem2->is_top();
1168  }
1169  Node* memory() const {
1170    assert(!is_empty(), "must not be empty");
1171    assert_synch();
1172    return _mem;
1173  }
1174  // get the current memory, regardless of empty or non-empty status
1175  Node* force_memory() const {
1176    assert(!is_empty() || !at_base_memory(), "");
1177    // Use _mm_base to defend against updates to _mem->base_memory().
1178    Node *mem = _mem->is_top() ? _mm_base : _mem;
1179    assert(mem == check_memory(), "");
1180    return mem;
1181  }
1182  Node* memory2() const {
1183    assert(_mem2 == check_memory2(), "");
1184    return _mem2;
1185  }
1186  void set_memory(Node* mem) {
1187    if (at_base_memory()) {
1188      // Note that this does not change the invariant _mm_base.
1189      _mm->set_base_memory(mem);
1190    } else {
1191      _mm->set_memory_at(_idx, mem);
1192    }
1193    _mem = mem;
1194    assert_synch();
1195  }
1196
1197  // Recover from a side effect to the MergeMemNode.
1198  void set_memory() {
1199    _mem = _mm->in(_idx);
1200  }
1201
1202  bool next()  { return next(false); }
1203  bool next2() { return next(true); }
1204
1205  bool next_non_empty()  { return next_non_empty(false); }
1206  bool next_non_empty2() { return next_non_empty(true); }
1207  // next_non_empty2 can yield states where is_empty() is true
1208
1209 private:
1210  // find the next item, which might be empty
1211  bool next(bool have_mm2) {
1212    assert((_mm2 != NULL) == have_mm2, "use other next");
1213    assert_synch();
1214    if (++_idx < _cnt) {
1215      // Note:  This iterator allows _mm to be non-sparse.
1216      // It behaves the same whether _mem is top or base_memory.
1217      _mem = _mm->in(_idx);
1218      if (have_mm2)
1219        _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1220      return true;
1221    }
1222    return false;
1223  }
1224
1225  // find the next non-empty item
1226  bool next_non_empty(bool have_mm2) {
1227    while (next(have_mm2)) {
1228      if (!is_empty()) {
1229        // make sure _mem2 is filled in sensibly
1230        if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1231        return true;
1232      } else if (have_mm2 && !is_empty2()) {
1233        return true;   // is_empty() == true
1234      }
1235    }
1236    return false;
1237  }
1238};
1239
1240//------------------------------Prefetch---------------------------------------
1241
1242// Non-faulting prefetch load.  Prefetch for many reads.
1243class PrefetchReadNode : public Node {
1244public:
1245  PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1246  virtual int Opcode() const;
1247  virtual uint ideal_reg() const { return NotAMachineReg; }
1248  virtual uint match_edge(uint idx) const { return idx==2; }
1249  virtual const Type *bottom_type() const { return Type::ABIO; }
1250};
1251
1252// Non-faulting prefetch load.  Prefetch for many reads & many writes.
1253class PrefetchWriteNode : public Node {
1254public:
1255  PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1256  virtual int Opcode() const;
1257  virtual uint ideal_reg() const { return NotAMachineReg; }
1258  virtual uint match_edge(uint idx) const { return idx==2; }
1259  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1260};
1261
1262#endif // SHARE_VM_OPTO_MEMNODE_HPP
1263