memnode.hpp revision 1472:c18cbe5936b8
1/*
2 * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Portions of code courtesy of Clifford Click
26
27class MultiNode;
28class PhaseCCP;
29class PhaseTransform;
30
31//------------------------------MemNode----------------------------------------
32// Load or Store, possibly throwing a NULL pointer exception
33class MemNode : public Node {
34protected:
35#ifdef ASSERT
36  const TypePtr* _adr_type;     // What kind of memory is being addressed?
37#endif
38  virtual uint size_of() const; // Size is bigger (ASSERT only)
39public:
40  enum { Control,               // When is it safe to do this load?
41         Memory,                // Chunk of memory is being loaded from
42         Address,               // Actually address, derived from base
43         ValueIn,               // Value to store
44         OopStore               // Preceeding oop store, only in StoreCM
45  };
46protected:
47  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
48    : Node(c0,c1,c2   ) {
49    init_class_id(Class_Mem);
50    debug_only(_adr_type=at; adr_type();)
51  }
52  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
53    : Node(c0,c1,c2,c3) {
54    init_class_id(Class_Mem);
55    debug_only(_adr_type=at; adr_type();)
56  }
57  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
58    : Node(c0,c1,c2,c3,c4) {
59    init_class_id(Class_Mem);
60    debug_only(_adr_type=at; adr_type();)
61  }
62
63public:
64  // Helpers for the optimizer.  Documented in memnode.cpp.
65  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
66                                      Node* p2, AllocateNode* a2,
67                                      PhaseTransform* phase);
68  static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
69
70  static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
71  static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
72  // This one should probably be a phase-specific function:
73  static bool all_controls_dominate(Node* dom, Node* sub);
74
75  // Find any cast-away of null-ness and keep its control.
76  static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
77  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
78
79  virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
80
81  // Shared code for Ideal methods:
82  Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
83
84  // Helper function for adr_type() implementations.
85  static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
86
87  // Raw access function, to allow copying of adr_type efficiently in
88  // product builds and retain the debug info for debug builds.
89  const TypePtr *raw_adr_type() const {
90#ifdef ASSERT
91    return _adr_type;
92#else
93    return 0;
94#endif
95  }
96
97  // Map a load or store opcode to its corresponding store opcode.
98  // (Return -1 if unknown.)
99  virtual int store_Opcode() const { return -1; }
100
101  // What is the type of the value in memory?  (T_VOID mean "unspecified".)
102  virtual BasicType memory_type() const = 0;
103  virtual int memory_size() const {
104#ifdef ASSERT
105    return type2aelembytes(memory_type(), true);
106#else
107    return type2aelembytes(memory_type());
108#endif
109  }
110
111  // Search through memory states which precede this node (load or store).
112  // Look for an exact match for the address, with no intervening
113  // aliased stores.
114  Node* find_previous_store(PhaseTransform* phase);
115
116  // Can this node (load or store) accurately see a stored value in
117  // the given memory state?  (The state may or may not be in(Memory).)
118  Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
119
120#ifndef PRODUCT
121  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
122  virtual void dump_spec(outputStream *st) const;
123#endif
124};
125
126//------------------------------LoadNode---------------------------------------
127// Load value; requires Memory and Address
128class LoadNode : public MemNode {
129protected:
130  virtual uint cmp( const Node &n ) const;
131  virtual uint size_of() const; // Size is bigger
132  const Type* const _type;      // What kind of value is loaded?
133public:
134
135  LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
136    : MemNode(c,mem,adr,at), _type(rt) {
137    init_class_id(Class_Load);
138  }
139
140  // Polymorphic factory method:
141  static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
142                     const TypePtr* at, const Type *rt, BasicType bt );
143
144  virtual uint hash()   const;  // Check the type
145
146  // Handle algebraic identities here.  If we have an identity, return the Node
147  // we are equivalent to.  We look for Load of a Store.
148  virtual Node *Identity( PhaseTransform *phase );
149
150  // If the load is from Field memory and the pointer is non-null, we can
151  // zero out the control input.
152  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
153
154  // Split instance field load through Phi.
155  Node* split_through_phi(PhaseGVN *phase);
156
157  // Recover original value from boxed values
158  Node *eliminate_autobox(PhaseGVN *phase);
159
160  // Compute a new Type for this node.  Basically we just do the pre-check,
161  // then call the virtual add() to set the type.
162  virtual const Type *Value( PhaseTransform *phase ) const;
163
164  // Common methods for LoadKlass and LoadNKlass nodes.
165  const Type *klass_value_common( PhaseTransform *phase ) const;
166  Node *klass_identity_common( PhaseTransform *phase );
167
168  virtual uint ideal_reg() const;
169  virtual const Type *bottom_type() const;
170  // Following method is copied from TypeNode:
171  void set_type(const Type* t) {
172    assert(t != NULL, "sanity");
173    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
174    *(const Type**)&_type = t;   // cast away const-ness
175    // If this node is in the hash table, make sure it doesn't need a rehash.
176    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
177  }
178  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
179
180  // Do not match memory edge
181  virtual uint match_edge(uint idx) const;
182
183  // Map a load opcode to its corresponding store opcode.
184  virtual int store_Opcode() const = 0;
185
186  // Check if the load's memory input is a Phi node with the same control.
187  bool is_instance_field_load_with_local_phi(Node* ctrl);
188
189#ifndef PRODUCT
190  virtual void dump_spec(outputStream *st) const;
191#endif
192protected:
193  const Type* load_array_final_field(const TypeKlassPtr *tkls,
194                                     ciKlass* klass) const;
195};
196
197//------------------------------LoadBNode--------------------------------------
198// Load a byte (8bits signed) from memory
199class LoadBNode : public LoadNode {
200public:
201  LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
202    : LoadNode(c,mem,adr,at,ti) {}
203  virtual int Opcode() const;
204  virtual uint ideal_reg() const { return Op_RegI; }
205  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
206  virtual int store_Opcode() const { return Op_StoreB; }
207  virtual BasicType memory_type() const { return T_BYTE; }
208};
209
210//------------------------------LoadUBNode-------------------------------------
211// Load a unsigned byte (8bits unsigned) from memory
212class LoadUBNode : public LoadNode {
213public:
214  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
215    : LoadNode(c, mem, adr, at, ti) {}
216  virtual int Opcode() const;
217  virtual uint ideal_reg() const { return Op_RegI; }
218  virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
219  virtual int store_Opcode() const { return Op_StoreB; }
220  virtual BasicType memory_type() const { return T_BYTE; }
221};
222
223//------------------------------LoadUSNode-------------------------------------
224// Load an unsigned short/char (16bits unsigned) from memory
225class LoadUSNode : public LoadNode {
226public:
227  LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
228    : LoadNode(c,mem,adr,at,ti) {}
229  virtual int Opcode() const;
230  virtual uint ideal_reg() const { return Op_RegI; }
231  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
232  virtual int store_Opcode() const { return Op_StoreC; }
233  virtual BasicType memory_type() const { return T_CHAR; }
234};
235
236//------------------------------LoadINode--------------------------------------
237// Load an integer from memory
238class LoadINode : public LoadNode {
239public:
240  LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
241    : LoadNode(c,mem,adr,at,ti) {}
242  virtual int Opcode() const;
243  virtual uint ideal_reg() const { return Op_RegI; }
244  virtual int store_Opcode() const { return Op_StoreI; }
245  virtual BasicType memory_type() const { return T_INT; }
246};
247
248//------------------------------LoadUI2LNode-----------------------------------
249// Load an unsigned integer into long from memory
250class LoadUI2LNode : public LoadNode {
251public:
252  LoadUI2LNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeLong* t = TypeLong::UINT)
253    : LoadNode(c, mem, adr, at, t) {}
254  virtual int Opcode() const;
255  virtual uint ideal_reg() const { return Op_RegL; }
256  virtual int store_Opcode() const { return Op_StoreL; }
257  virtual BasicType memory_type() const { return T_LONG; }
258};
259
260//------------------------------LoadRangeNode----------------------------------
261// Load an array length from the array
262class LoadRangeNode : public LoadINode {
263public:
264  LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
265    : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
266  virtual int Opcode() const;
267  virtual const Type *Value( PhaseTransform *phase ) const;
268  virtual Node *Identity( PhaseTransform *phase );
269  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
270};
271
272//------------------------------LoadLNode--------------------------------------
273// Load a long from memory
274class LoadLNode : public LoadNode {
275  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
276  virtual uint cmp( const Node &n ) const {
277    return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
278      && LoadNode::cmp(n);
279  }
280  virtual uint size_of() const { return sizeof(*this); }
281  const bool _require_atomic_access;  // is piecewise load forbidden?
282
283public:
284  LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
285             const TypeLong *tl = TypeLong::LONG,
286             bool require_atomic_access = false )
287    : LoadNode(c,mem,adr,at,tl)
288    , _require_atomic_access(require_atomic_access)
289  {}
290  virtual int Opcode() const;
291  virtual uint ideal_reg() const { return Op_RegL; }
292  virtual int store_Opcode() const { return Op_StoreL; }
293  virtual BasicType memory_type() const { return T_LONG; }
294  bool require_atomic_access() { return _require_atomic_access; }
295  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
296#ifndef PRODUCT
297  virtual void dump_spec(outputStream *st) const {
298    LoadNode::dump_spec(st);
299    if (_require_atomic_access)  st->print(" Atomic!");
300  }
301#endif
302};
303
304//------------------------------LoadL_unalignedNode----------------------------
305// Load a long from unaligned memory
306class LoadL_unalignedNode : public LoadLNode {
307public:
308  LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
309    : LoadLNode(c,mem,adr,at) {}
310  virtual int Opcode() const;
311};
312
313//------------------------------LoadFNode--------------------------------------
314// Load a float (64 bits) from memory
315class LoadFNode : public LoadNode {
316public:
317  LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
318    : LoadNode(c,mem,adr,at,t) {}
319  virtual int Opcode() const;
320  virtual uint ideal_reg() const { return Op_RegF; }
321  virtual int store_Opcode() const { return Op_StoreF; }
322  virtual BasicType memory_type() const { return T_FLOAT; }
323};
324
325//------------------------------LoadDNode--------------------------------------
326// Load a double (64 bits) from memory
327class LoadDNode : public LoadNode {
328public:
329  LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
330    : LoadNode(c,mem,adr,at,t) {}
331  virtual int Opcode() const;
332  virtual uint ideal_reg() const { return Op_RegD; }
333  virtual int store_Opcode() const { return Op_StoreD; }
334  virtual BasicType memory_type() const { return T_DOUBLE; }
335};
336
337//------------------------------LoadD_unalignedNode----------------------------
338// Load a double from unaligned memory
339class LoadD_unalignedNode : public LoadDNode {
340public:
341  LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
342    : LoadDNode(c,mem,adr,at) {}
343  virtual int Opcode() const;
344};
345
346//------------------------------LoadPNode--------------------------------------
347// Load a pointer from memory (either object or array)
348class LoadPNode : public LoadNode {
349public:
350  LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
351    : LoadNode(c,mem,adr,at,t) {}
352  virtual int Opcode() const;
353  virtual uint ideal_reg() const { return Op_RegP; }
354  virtual int store_Opcode() const { return Op_StoreP; }
355  virtual BasicType memory_type() const { return T_ADDRESS; }
356  // depends_only_on_test is almost always true, and needs to be almost always
357  // true to enable key hoisting & commoning optimizations.  However, for the
358  // special case of RawPtr loads from TLS top & end, the control edge carries
359  // the dependence preventing hoisting past a Safepoint instead of the memory
360  // edge.  (An unfortunate consequence of having Safepoints not set Raw
361  // Memory; itself an unfortunate consequence of having Nodes which produce
362  // results (new raw memory state) inside of loops preventing all manner of
363  // other optimizations).  Basically, it's ugly but so is the alternative.
364  // See comment in macro.cpp, around line 125 expand_allocate_common().
365  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
366};
367
368
369//------------------------------LoadNNode--------------------------------------
370// Load a narrow oop from memory (either object or array)
371class LoadNNode : public LoadNode {
372public:
373  LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
374    : LoadNode(c,mem,adr,at,t) {}
375  virtual int Opcode() const;
376  virtual uint ideal_reg() const { return Op_RegN; }
377  virtual int store_Opcode() const { return Op_StoreN; }
378  virtual BasicType memory_type() const { return T_NARROWOOP; }
379  // depends_only_on_test is almost always true, and needs to be almost always
380  // true to enable key hoisting & commoning optimizations.  However, for the
381  // special case of RawPtr loads from TLS top & end, the control edge carries
382  // the dependence preventing hoisting past a Safepoint instead of the memory
383  // edge.  (An unfortunate consequence of having Safepoints not set Raw
384  // Memory; itself an unfortunate consequence of having Nodes which produce
385  // results (new raw memory state) inside of loops preventing all manner of
386  // other optimizations).  Basically, it's ugly but so is the alternative.
387  // See comment in macro.cpp, around line 125 expand_allocate_common().
388  virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
389};
390
391//------------------------------LoadKlassNode----------------------------------
392// Load a Klass from an object
393class LoadKlassNode : public LoadPNode {
394public:
395  LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
396    : LoadPNode(c,mem,adr,at,tk) {}
397  virtual int Opcode() const;
398  virtual const Type *Value( PhaseTransform *phase ) const;
399  virtual Node *Identity( PhaseTransform *phase );
400  virtual bool depends_only_on_test() const { return true; }
401
402  // Polymorphic factory method:
403  static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
404                     const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
405};
406
407//------------------------------LoadNKlassNode---------------------------------
408// Load a narrow Klass from an object.
409class LoadNKlassNode : public LoadNNode {
410public:
411  LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
412    : LoadNNode(c,mem,adr,at,tk) {}
413  virtual int Opcode() const;
414  virtual uint ideal_reg() const { return Op_RegN; }
415  virtual int store_Opcode() const { return Op_StoreN; }
416  virtual BasicType memory_type() const { return T_NARROWOOP; }
417
418  virtual const Type *Value( PhaseTransform *phase ) const;
419  virtual Node *Identity( PhaseTransform *phase );
420  virtual bool depends_only_on_test() const { return true; }
421};
422
423
424//------------------------------LoadSNode--------------------------------------
425// Load a short (16bits signed) from memory
426class LoadSNode : public LoadNode {
427public:
428  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
429    : LoadNode(c,mem,adr,at,ti) {}
430  virtual int Opcode() const;
431  virtual uint ideal_reg() const { return Op_RegI; }
432  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
433  virtual int store_Opcode() const { return Op_StoreC; }
434  virtual BasicType memory_type() const { return T_SHORT; }
435};
436
437//------------------------------StoreNode--------------------------------------
438// Store value; requires Store, Address and Value
439class StoreNode : public MemNode {
440protected:
441  virtual uint cmp( const Node &n ) const;
442  virtual bool depends_only_on_test() const { return false; }
443
444  Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
445  Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
446
447public:
448  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
449    : MemNode(c,mem,adr,at,val) {
450    init_class_id(Class_Store);
451  }
452  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
453    : MemNode(c,mem,adr,at,val,oop_store) {
454    init_class_id(Class_Store);
455  }
456
457  // Polymorphic factory method:
458  static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
459                          const TypePtr* at, Node *val, BasicType bt );
460
461  virtual uint hash() const;    // Check the type
462
463  // If the store is to Field memory and the pointer is non-null, we can
464  // zero out the control input.
465  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
466
467  // Compute a new Type for this node.  Basically we just do the pre-check,
468  // then call the virtual add() to set the type.
469  virtual const Type *Value( PhaseTransform *phase ) const;
470
471  // Check for identity function on memory (Load then Store at same address)
472  virtual Node *Identity( PhaseTransform *phase );
473
474  // Do not match memory edge
475  virtual uint match_edge(uint idx) const;
476
477  virtual const Type *bottom_type() const;  // returns Type::MEMORY
478
479  // Map a store opcode to its corresponding own opcode, trivially.
480  virtual int store_Opcode() const { return Opcode(); }
481
482  // have all possible loads of the value stored been optimized away?
483  bool value_never_loaded(PhaseTransform *phase) const;
484};
485
486//------------------------------StoreBNode-------------------------------------
487// Store byte to memory
488class StoreBNode : public StoreNode {
489public:
490  StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
491  virtual int Opcode() const;
492  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
493  virtual BasicType memory_type() const { return T_BYTE; }
494};
495
496//------------------------------StoreCNode-------------------------------------
497// Store char/short to memory
498class StoreCNode : public StoreNode {
499public:
500  StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
501  virtual int Opcode() const;
502  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
503  virtual BasicType memory_type() const { return T_CHAR; }
504};
505
506//------------------------------StoreINode-------------------------------------
507// Store int to memory
508class StoreINode : public StoreNode {
509public:
510  StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
511  virtual int Opcode() const;
512  virtual BasicType memory_type() const { return T_INT; }
513};
514
515//------------------------------StoreLNode-------------------------------------
516// Store long to memory
517class StoreLNode : public StoreNode {
518  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
519  virtual uint cmp( const Node &n ) const {
520    return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
521      && StoreNode::cmp(n);
522  }
523  virtual uint size_of() const { return sizeof(*this); }
524  const bool _require_atomic_access;  // is piecewise store forbidden?
525
526public:
527  StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
528              bool require_atomic_access = false )
529    : StoreNode(c,mem,adr,at,val)
530    , _require_atomic_access(require_atomic_access)
531  {}
532  virtual int Opcode() const;
533  virtual BasicType memory_type() const { return T_LONG; }
534  bool require_atomic_access() { return _require_atomic_access; }
535  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
536#ifndef PRODUCT
537  virtual void dump_spec(outputStream *st) const {
538    StoreNode::dump_spec(st);
539    if (_require_atomic_access)  st->print(" Atomic!");
540  }
541#endif
542};
543
544//------------------------------StoreFNode-------------------------------------
545// Store float to memory
546class StoreFNode : public StoreNode {
547public:
548  StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
549  virtual int Opcode() const;
550  virtual BasicType memory_type() const { return T_FLOAT; }
551};
552
553//------------------------------StoreDNode-------------------------------------
554// Store double to memory
555class StoreDNode : public StoreNode {
556public:
557  StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
558  virtual int Opcode() const;
559  virtual BasicType memory_type() const { return T_DOUBLE; }
560};
561
562//------------------------------StorePNode-------------------------------------
563// Store pointer to memory
564class StorePNode : public StoreNode {
565public:
566  StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
567  virtual int Opcode() const;
568  virtual BasicType memory_type() const { return T_ADDRESS; }
569};
570
571//------------------------------StoreNNode-------------------------------------
572// Store narrow oop to memory
573class StoreNNode : public StoreNode {
574public:
575  StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
576  virtual int Opcode() const;
577  virtual BasicType memory_type() const { return T_NARROWOOP; }
578};
579
580//------------------------------StoreCMNode-----------------------------------
581// Store card-mark byte to memory for CM
582// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
583// Preceeding equivalent StoreCMs may be eliminated.
584class StoreCMNode : public StoreNode {
585 private:
586  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
587  virtual uint cmp( const Node &n ) const {
588    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
589      && StoreNode::cmp(n);
590  }
591  virtual uint size_of() const { return sizeof(*this); }
592  int _oop_alias_idx;   // The alias_idx of OopStore
593
594public:
595  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
596    StoreNode(c,mem,adr,at,val,oop_store),
597    _oop_alias_idx(oop_alias_idx) {
598    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
599           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
600           "bad oop alias idx");
601  }
602  virtual int Opcode() const;
603  virtual Node *Identity( PhaseTransform *phase );
604  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
605  virtual const Type *Value( PhaseTransform *phase ) const;
606  virtual BasicType memory_type() const { return T_VOID; } // unspecific
607  int oop_alias_idx() const { return _oop_alias_idx; }
608};
609
610//------------------------------LoadPLockedNode---------------------------------
611// Load-locked a pointer from memory (either object or array).
612// On Sparc & Intel this is implemented as a normal pointer load.
613// On PowerPC and friends it's a real load-locked.
614class LoadPLockedNode : public LoadPNode {
615public:
616  LoadPLockedNode( Node *c, Node *mem, Node *adr )
617    : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
618  virtual int Opcode() const;
619  virtual int store_Opcode() const { return Op_StorePConditional; }
620  virtual bool depends_only_on_test() const { return true; }
621};
622
623//------------------------------LoadLLockedNode---------------------------------
624// Load-locked a pointer from memory (either object or array).
625// On Sparc & Intel this is implemented as a normal long load.
626class LoadLLockedNode : public LoadLNode {
627public:
628  LoadLLockedNode( Node *c, Node *mem, Node *adr )
629    : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
630  virtual int Opcode() const;
631  virtual int store_Opcode() const { return Op_StoreLConditional; }
632};
633
634//------------------------------SCMemProjNode---------------------------------------
635// This class defines a projection of the memory  state of a store conditional node.
636// These nodes return a value, but also update memory.
637class SCMemProjNode : public ProjNode {
638public:
639  enum {SCMEMPROJCON = (uint)-2};
640  SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
641  virtual int Opcode() const;
642  virtual bool      is_CFG() const  { return false; }
643  virtual const Type *bottom_type() const {return Type::MEMORY;}
644  virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
645  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
646  virtual const Type *Value( PhaseTransform *phase ) const;
647#ifndef PRODUCT
648  virtual void dump_spec(outputStream *st) const {};
649#endif
650};
651
652//------------------------------LoadStoreNode---------------------------
653// Note: is_Mem() method returns 'true' for this class.
654class LoadStoreNode : public Node {
655public:
656  enum {
657    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
658  };
659  LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
660  virtual bool depends_only_on_test() const { return false; }
661  virtual const Type *bottom_type() const { return TypeInt::BOOL; }
662  virtual uint ideal_reg() const { return Op_RegI; }
663  virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
664};
665
666//------------------------------StorePConditionalNode---------------------------
667// Conditionally store pointer to memory, if no change since prior
668// load-locked.  Sets flags for success or failure of the store.
669class StorePConditionalNode : public LoadStoreNode {
670public:
671  StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
672  virtual int Opcode() const;
673  // Produces flags
674  virtual uint ideal_reg() const { return Op_RegFlags; }
675};
676
677//------------------------------StoreIConditionalNode---------------------------
678// Conditionally store int to memory, if no change since prior
679// load-locked.  Sets flags for success or failure of the store.
680class StoreIConditionalNode : public LoadStoreNode {
681public:
682  StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreNode(c, mem, adr, val, ii) { }
683  virtual int Opcode() const;
684  // Produces flags
685  virtual uint ideal_reg() const { return Op_RegFlags; }
686};
687
688//------------------------------StoreLConditionalNode---------------------------
689// Conditionally store long to memory, if no change since prior
690// load-locked.  Sets flags for success or failure of the store.
691class StoreLConditionalNode : public LoadStoreNode {
692public:
693  StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
694  virtual int Opcode() const;
695  // Produces flags
696  virtual uint ideal_reg() const { return Op_RegFlags; }
697};
698
699
700//------------------------------CompareAndSwapLNode---------------------------
701class CompareAndSwapLNode : public LoadStoreNode {
702public:
703  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
704  virtual int Opcode() const;
705};
706
707
708//------------------------------CompareAndSwapINode---------------------------
709class CompareAndSwapINode : public LoadStoreNode {
710public:
711  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
712  virtual int Opcode() const;
713};
714
715
716//------------------------------CompareAndSwapPNode---------------------------
717class CompareAndSwapPNode : public LoadStoreNode {
718public:
719  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
720  virtual int Opcode() const;
721};
722
723//------------------------------CompareAndSwapNNode---------------------------
724class CompareAndSwapNNode : public LoadStoreNode {
725public:
726  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
727  virtual int Opcode() const;
728};
729
730//------------------------------ClearArray-------------------------------------
731class ClearArrayNode: public Node {
732public:
733  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
734    : Node(ctrl,arymem,word_cnt,base) {
735    init_class_id(Class_ClearArray);
736  }
737  virtual int         Opcode() const;
738  virtual const Type *bottom_type() const { return Type::MEMORY; }
739  // ClearArray modifies array elements, and so affects only the
740  // array memory addressed by the bottom_type of its base address.
741  virtual const class TypePtr *adr_type() const;
742  virtual Node *Identity( PhaseTransform *phase );
743  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
744  virtual uint match_edge(uint idx) const;
745
746  // Clear the given area of an object or array.
747  // The start offset must always be aligned mod BytesPerInt.
748  // The end offset must always be aligned mod BytesPerLong.
749  // Return the new memory.
750  static Node* clear_memory(Node* control, Node* mem, Node* dest,
751                            intptr_t start_offset,
752                            intptr_t end_offset,
753                            PhaseGVN* phase);
754  static Node* clear_memory(Node* control, Node* mem, Node* dest,
755                            intptr_t start_offset,
756                            Node* end_offset,
757                            PhaseGVN* phase);
758  static Node* clear_memory(Node* control, Node* mem, Node* dest,
759                            Node* start_offset,
760                            Node* end_offset,
761                            PhaseGVN* phase);
762  // Return allocation input memory edge if it is different instance
763  // or itself if it is the one we are looking for.
764  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
765};
766
767//------------------------------StrComp-------------------------------------
768class StrCompNode: public Node {
769public:
770  StrCompNode(Node* control, Node* char_array_mem,
771              Node* s1, Node* c1,
772              Node* s2, Node* c2): Node(control, char_array_mem,
773                                        s1, c1,
774                                        s2, c2) {};
775  virtual int Opcode() const;
776  virtual bool depends_only_on_test() const { return false; }
777  virtual const Type* bottom_type() const { return TypeInt::INT; }
778  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
779  virtual uint match_edge(uint idx) const;
780  virtual uint ideal_reg() const { return Op_RegI; }
781  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
782};
783
784//------------------------------StrEquals-------------------------------------
785class StrEqualsNode: public Node {
786public:
787  StrEqualsNode(Node* control, Node* char_array_mem,
788                Node* s1, Node* s2, Node* c): Node(control, char_array_mem,
789                                                   s1, s2, c) {};
790  virtual int Opcode() const;
791  virtual bool depends_only_on_test() const { return false; }
792  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
793  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
794  virtual uint match_edge(uint idx) const;
795  virtual uint ideal_reg() const { return Op_RegI; }
796  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
797};
798
799//------------------------------StrIndexOf-------------------------------------
800class StrIndexOfNode: public Node {
801public:
802  StrIndexOfNode(Node* control, Node* char_array_mem,
803                 Node* s1, Node* c1,
804                 Node* s2, Node* c2): Node(control, char_array_mem,
805                                           s1, c1,
806                                           s2, c2) {};
807  virtual int Opcode() const;
808  virtual bool depends_only_on_test() const { return false; }
809  virtual const Type* bottom_type() const { return TypeInt::INT; }
810  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
811  virtual uint match_edge(uint idx) const;
812  virtual uint ideal_reg() const { return Op_RegI; }
813  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
814};
815
816//------------------------------AryEq---------------------------------------
817class AryEqNode: public Node {
818public:
819  AryEqNode(Node* control, Node* char_array_mem,
820            Node* s1, Node* s2): Node(control, char_array_mem, s1, s2) {};
821  virtual int Opcode() const;
822  virtual bool depends_only_on_test() const { return false; }
823  virtual const Type* bottom_type() const { return TypeInt::BOOL; }
824  virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
825  virtual uint match_edge(uint idx) const;
826  virtual uint ideal_reg() const { return Op_RegI; }
827  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
828};
829
830//------------------------------MemBar-----------------------------------------
831// There are different flavors of Memory Barriers to match the Java Memory
832// Model.  Monitor-enter and volatile-load act as Aquires: no following ref
833// can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
834// volatile-load.  Monitor-exit and volatile-store act as Release: no
835// preceding ref can be moved to after them.  We insert a MemBar-Release
836// before a FastUnlock or volatile-store.  All volatiles need to be
837// serialized, so we follow all volatile-stores with a MemBar-Volatile to
838// separate it from any following volatile-load.
839class MemBarNode: public MultiNode {
840  virtual uint hash() const ;                  // { return NO_HASH; }
841  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
842
843  virtual uint size_of() const { return sizeof(*this); }
844  // Memory type this node is serializing.  Usually either rawptr or bottom.
845  const TypePtr* _adr_type;
846
847public:
848  enum {
849    Precedent = TypeFunc::Parms  // optional edge to force precedence
850  };
851  MemBarNode(Compile* C, int alias_idx, Node* precedent);
852  virtual int Opcode() const = 0;
853  virtual const class TypePtr *adr_type() const { return _adr_type; }
854  virtual const Type *Value( PhaseTransform *phase ) const;
855  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
856  virtual uint match_edge(uint idx) const { return 0; }
857  virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
858  virtual Node *match( const ProjNode *proj, const Matcher *m );
859  // Factory method.  Builds a wide or narrow membar.
860  // Optional 'precedent' becomes an extra edge if not null.
861  static MemBarNode* make(Compile* C, int opcode,
862                          int alias_idx = Compile::AliasIdxBot,
863                          Node* precedent = NULL);
864};
865
866// "Acquire" - no following ref can move before (but earlier refs can
867// follow, like an early Load stalled in cache).  Requires multi-cpu
868// visibility.  Inserted after a volatile load or FastLock.
869class MemBarAcquireNode: public MemBarNode {
870public:
871  MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
872    : MemBarNode(C, alias_idx, precedent) {}
873  virtual int Opcode() const;
874};
875
876// "Release" - no earlier ref can move after (but later refs can move
877// up, like a speculative pipelined cache-hitting Load).  Requires
878// multi-cpu visibility.  Inserted before a volatile store or FastUnLock.
879class MemBarReleaseNode: public MemBarNode {
880public:
881  MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
882    : MemBarNode(C, alias_idx, precedent) {}
883  virtual int Opcode() const;
884};
885
886// Ordering between a volatile store and a following volatile load.
887// Requires multi-CPU visibility?
888class MemBarVolatileNode: public MemBarNode {
889public:
890  MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
891    : MemBarNode(C, alias_idx, precedent) {}
892  virtual int Opcode() const;
893};
894
895// Ordering within the same CPU.  Used to order unsafe memory references
896// inside the compiler when we lack alias info.  Not needed "outside" the
897// compiler because the CPU does all the ordering for us.
898class MemBarCPUOrderNode: public MemBarNode {
899public:
900  MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
901    : MemBarNode(C, alias_idx, precedent) {}
902  virtual int Opcode() const;
903  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
904};
905
906// Isolation of object setup after an AllocateNode and before next safepoint.
907// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
908class InitializeNode: public MemBarNode {
909  friend class AllocateNode;
910
911  bool _is_complete;
912
913public:
914  enum {
915    Control    = TypeFunc::Control,
916    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
917    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
918    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
919  };
920
921  InitializeNode(Compile* C, int adr_type, Node* rawoop);
922  virtual int Opcode() const;
923  virtual uint size_of() const { return sizeof(*this); }
924  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
925  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
926
927  // Manage incoming memory edges via a MergeMem on in(Memory):
928  Node* memory(uint alias_idx);
929
930  // The raw memory edge coming directly from the Allocation.
931  // The contents of this memory are *always* all-zero-bits.
932  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
933
934  // Return the corresponding allocation for this initialization (or null if none).
935  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
936  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
937  AllocateNode* allocation();
938
939  // Anything other than zeroing in this init?
940  bool is_non_zero();
941
942  // An InitializeNode must completed before macro expansion is done.
943  // Completion requires that the AllocateNode must be followed by
944  // initialization of the new memory to zero, then to any initializers.
945  bool is_complete() { return _is_complete; }
946
947  // Mark complete.  (Must not yet be complete.)
948  void set_complete(PhaseGVN* phase);
949
950#ifdef ASSERT
951  // ensure all non-degenerate stores are ordered and non-overlapping
952  bool stores_are_sane(PhaseTransform* phase);
953#endif //ASSERT
954
955  // See if this store can be captured; return offset where it initializes.
956  // Return 0 if the store cannot be moved (any sort of problem).
957  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
958
959  // Capture another store; reformat it to write my internal raw memory.
960  // Return the captured copy, else NULL if there is some sort of problem.
961  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
962
963  // Find captured store which corresponds to the range [start..start+size).
964  // Return my own memory projection (meaning the initial zero bits)
965  // if there is no such store.  Return NULL if there is a problem.
966  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
967
968  // Called when the associated AllocateNode is expanded into CFG.
969  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
970                        intptr_t header_size, Node* size_in_bytes,
971                        PhaseGVN* phase);
972
973 private:
974  void remove_extra_zeroes();
975
976  // Find out where a captured store should be placed (or already is placed).
977  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
978                                     PhaseTransform* phase);
979
980  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
981
982  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
983
984  bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
985
986  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
987                               PhaseGVN* phase);
988
989  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
990};
991
992//------------------------------MergeMem---------------------------------------
993// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
994class MergeMemNode: public Node {
995  virtual uint hash() const ;                  // { return NO_HASH; }
996  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
997  friend class MergeMemStream;
998  MergeMemNode(Node* def);  // clients use MergeMemNode::make
999
1000public:
1001  // If the input is a whole memory state, clone it with all its slices intact.
1002  // Otherwise, make a new memory state with just that base memory input.
1003  // In either case, the result is a newly created MergeMem.
1004  static MergeMemNode* make(Compile* C, Node* base_memory);
1005
1006  virtual int Opcode() const;
1007  virtual Node *Identity( PhaseTransform *phase );
1008  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1009  virtual uint ideal_reg() const { return NotAMachineReg; }
1010  virtual uint match_edge(uint idx) const { return 0; }
1011  virtual const RegMask &out_RegMask() const;
1012  virtual const Type *bottom_type() const { return Type::MEMORY; }
1013  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1014  // sparse accessors
1015  // Fetch the previously stored "set_memory_at", or else the base memory.
1016  // (Caller should clone it if it is a phi-nest.)
1017  Node* memory_at(uint alias_idx) const;
1018  // set the memory, regardless of its previous value
1019  void set_memory_at(uint alias_idx, Node* n);
1020  // the "base" is the memory that provides the non-finite support
1021  Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1022  // warning: setting the base can implicitly set any of the other slices too
1023  void set_base_memory(Node* def);
1024  // sentinel value which denotes a copy of the base memory:
1025  Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1026  static Node* make_empty_memory(); // where the sentinel comes from
1027  bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1028  // hook for the iterator, to perform any necessary setup
1029  void iteration_setup(const MergeMemNode* other = NULL);
1030  // push sentinels until I am at least as long as the other (semantic no-op)
1031  void grow_to_match(const MergeMemNode* other);
1032  bool verify_sparse() const PRODUCT_RETURN0;
1033#ifndef PRODUCT
1034  virtual void dump_spec(outputStream *st) const;
1035#endif
1036};
1037
1038class MergeMemStream : public StackObj {
1039 private:
1040  MergeMemNode*       _mm;
1041  const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1042  Node*               _mm_base;  // loop-invariant base memory of _mm
1043  int                 _idx;
1044  int                 _cnt;
1045  Node*               _mem;
1046  Node*               _mem2;
1047  int                 _cnt2;
1048
1049  void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1050    // subsume_node will break sparseness at times, whenever a memory slice
1051    // folds down to a copy of the base ("fat") memory.  In such a case,
1052    // the raw edge will update to base, although it should be top.
1053    // This iterator will recognize either top or base_memory as an
1054    // "empty" slice.  See is_empty, is_empty2, and next below.
1055    //
1056    // The sparseness property is repaired in MergeMemNode::Ideal.
1057    // As long as access to a MergeMem goes through this iterator
1058    // or the memory_at accessor, flaws in the sparseness will
1059    // never be observed.
1060    //
1061    // Also, iteration_setup repairs sparseness.
1062    assert(mm->verify_sparse(), "please, no dups of base");
1063    assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1064
1065    _mm  = mm;
1066    _mm_base = mm->base_memory();
1067    _mm2 = mm2;
1068    _cnt = mm->req();
1069    _idx = Compile::AliasIdxBot-1; // start at the base memory
1070    _mem = NULL;
1071    _mem2 = NULL;
1072  }
1073
1074#ifdef ASSERT
1075  Node* check_memory() const {
1076    if (at_base_memory())
1077      return _mm->base_memory();
1078    else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1079      return _mm->memory_at(_idx);
1080    else
1081      return _mm_base;
1082  }
1083  Node* check_memory2() const {
1084    return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1085  }
1086#endif
1087
1088  static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1089  void assert_synch() const {
1090    assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1091           "no side-effects except through the stream");
1092  }
1093
1094 public:
1095
1096  // expected usages:
1097  // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1098  // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1099
1100  // iterate over one merge
1101  MergeMemStream(MergeMemNode* mm) {
1102    mm->iteration_setup();
1103    init(mm);
1104    debug_only(_cnt2 = 999);
1105  }
1106  // iterate in parallel over two merges
1107  // only iterates through non-empty elements of mm2
1108  MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1109    assert(mm2, "second argument must be a MergeMem also");
1110    ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1111    mm->iteration_setup(mm2);
1112    init(mm, mm2);
1113    _cnt2 = mm2->req();
1114  }
1115#ifdef ASSERT
1116  ~MergeMemStream() {
1117    assert_synch();
1118  }
1119#endif
1120
1121  MergeMemNode* all_memory() const {
1122    return _mm;
1123  }
1124  Node* base_memory() const {
1125    assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1126    return _mm_base;
1127  }
1128  const MergeMemNode* all_memory2() const {
1129    assert(_mm2 != NULL, "");
1130    return _mm2;
1131  }
1132  bool at_base_memory() const {
1133    return _idx == Compile::AliasIdxBot;
1134  }
1135  int alias_idx() const {
1136    assert(_mem, "must call next 1st");
1137    return _idx;
1138  }
1139
1140  const TypePtr* adr_type() const {
1141    return Compile::current()->get_adr_type(alias_idx());
1142  }
1143
1144  const TypePtr* adr_type(Compile* C) const {
1145    return C->get_adr_type(alias_idx());
1146  }
1147  bool is_empty() const {
1148    assert(_mem, "must call next 1st");
1149    assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1150    return _mem->is_top();
1151  }
1152  bool is_empty2() const {
1153    assert(_mem2, "must call next 1st");
1154    assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1155    return _mem2->is_top();
1156  }
1157  Node* memory() const {
1158    assert(!is_empty(), "must not be empty");
1159    assert_synch();
1160    return _mem;
1161  }
1162  // get the current memory, regardless of empty or non-empty status
1163  Node* force_memory() const {
1164    assert(!is_empty() || !at_base_memory(), "");
1165    // Use _mm_base to defend against updates to _mem->base_memory().
1166    Node *mem = _mem->is_top() ? _mm_base : _mem;
1167    assert(mem == check_memory(), "");
1168    return mem;
1169  }
1170  Node* memory2() const {
1171    assert(_mem2 == check_memory2(), "");
1172    return _mem2;
1173  }
1174  void set_memory(Node* mem) {
1175    if (at_base_memory()) {
1176      // Note that this does not change the invariant _mm_base.
1177      _mm->set_base_memory(mem);
1178    } else {
1179      _mm->set_memory_at(_idx, mem);
1180    }
1181    _mem = mem;
1182    assert_synch();
1183  }
1184
1185  // Recover from a side effect to the MergeMemNode.
1186  void set_memory() {
1187    _mem = _mm->in(_idx);
1188  }
1189
1190  bool next()  { return next(false); }
1191  bool next2() { return next(true); }
1192
1193  bool next_non_empty()  { return next_non_empty(false); }
1194  bool next_non_empty2() { return next_non_empty(true); }
1195  // next_non_empty2 can yield states where is_empty() is true
1196
1197 private:
1198  // find the next item, which might be empty
1199  bool next(bool have_mm2) {
1200    assert((_mm2 != NULL) == have_mm2, "use other next");
1201    assert_synch();
1202    if (++_idx < _cnt) {
1203      // Note:  This iterator allows _mm to be non-sparse.
1204      // It behaves the same whether _mem is top or base_memory.
1205      _mem = _mm->in(_idx);
1206      if (have_mm2)
1207        _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1208      return true;
1209    }
1210    return false;
1211  }
1212
1213  // find the next non-empty item
1214  bool next_non_empty(bool have_mm2) {
1215    while (next(have_mm2)) {
1216      if (!is_empty()) {
1217        // make sure _mem2 is filled in sensibly
1218        if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1219        return true;
1220      } else if (have_mm2 && !is_empty2()) {
1221        return true;   // is_empty() == true
1222      }
1223    }
1224    return false;
1225  }
1226};
1227
1228//------------------------------Prefetch---------------------------------------
1229
1230// Non-faulting prefetch load.  Prefetch for many reads.
1231class PrefetchReadNode : public Node {
1232public:
1233  PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1234  virtual int Opcode() const;
1235  virtual uint ideal_reg() const { return NotAMachineReg; }
1236  virtual uint match_edge(uint idx) const { return idx==2; }
1237  virtual const Type *bottom_type() const { return Type::ABIO; }
1238};
1239
1240// Non-faulting prefetch load.  Prefetch for many reads & many writes.
1241class PrefetchWriteNode : public Node {
1242public:
1243  PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1244  virtual int Opcode() const;
1245  virtual uint ideal_reg() const { return NotAMachineReg; }
1246  virtual uint match_edge(uint idx) const { return idx==2; }
1247  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1248};
1249