1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OPTO_MEMNODE_HPP
26#define SHARE_VM_OPTO_MEMNODE_HPP
27
28#include "opto/multnode.hpp"
29#include "opto/node.hpp"
30#include "opto/opcodes.hpp"
31#include "opto/type.hpp"
32
33// Portions of code courtesy of Clifford Click
34
35class MultiNode;
36class PhaseCCP;
37class PhaseTransform;
38
39//------------------------------MemNode----------------------------------------
40// Load or Store, possibly throwing a NULL pointer exception
41class MemNode : public Node {
42private:
43  bool _unaligned_access; // Unaligned access from unsafe
44  bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
45protected:
46#ifdef ASSERT
47  const TypePtr* _adr_type;     // What kind of memory is being addressed?
48#endif
49  virtual uint size_of() const;
50public:
51  enum { Control,               // When is it safe to do this load?
52         Memory,                // Chunk of memory is being loaded from
53         Address,               // Actually address, derived from base
54         ValueIn,               // Value to store
55         OopStore               // Preceeding oop store, only in StoreCM
56  };
57  typedef enum { unordered = 0,
58                 acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
59                 release,       // Store has to release or be preceded by MemBarRelease.
60                 seqcst,        // LoadStore has to have both acquire and release semantics.
61                 unset          // The memory ordering is not set (used for testing)
62  } MemOrd;
63protected:
64  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
65    : Node(c0,c1,c2   ), _unaligned_access(false), _mismatched_access(false) {
66    init_class_id(Class_Mem);
67    debug_only(_adr_type=at; adr_type();)
68  }
69  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
70    : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
71    init_class_id(Class_Mem);
72    debug_only(_adr_type=at; adr_type();)
73  }
74  MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
75    : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
76    init_class_id(Class_Mem);
77    debug_only(_adr_type=at; adr_type();)
78  }
79
80  virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; }
81  static bool check_if_adr_maybe_raw(Node* adr);
82
83public:
84  // Helpers for the optimizer.  Documented in memnode.cpp.
85  static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
86                                      Node* p2, AllocateNode* a2,
87                                      PhaseTransform* phase);
88  static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
89
90  static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
91  static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
92  // This one should probably be a phase-specific function:
93  static bool all_controls_dominate(Node* dom, Node* sub);
94
95  virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
96
97  // Shared code for Ideal methods:
98  Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
99
100  // Helper function for adr_type() implementations.
101  static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
102
103  // Raw access function, to allow copying of adr_type efficiently in
104  // product builds and retain the debug info for debug builds.
105  const TypePtr *raw_adr_type() const {
106#ifdef ASSERT
107    return _adr_type;
108#else
109    return 0;
110#endif
111  }
112
113  // Map a load or store opcode to its corresponding store opcode.
114  // (Return -1 if unknown.)
115  virtual int store_Opcode() const { return -1; }
116
117  // What is the type of the value in memory?  (T_VOID mean "unspecified".)
118  virtual BasicType memory_type() const = 0;
119  virtual int memory_size() const {
120#ifdef ASSERT
121    return type2aelembytes(memory_type(), true);
122#else
123    return type2aelembytes(memory_type());
124#endif
125  }
126
127  // Search through memory states which precede this node (load or store).
128  // Look for an exact match for the address, with no intervening
129  // aliased stores.
130  Node* find_previous_store(PhaseTransform* phase);
131
132  // Can this node (load or store) accurately see a stored value in
133  // the given memory state?  (The state may or may not be in(Memory).)
134  Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
135
136  void set_unaligned_access() { _unaligned_access = true; }
137  bool is_unaligned_access() const { return _unaligned_access; }
138  void set_mismatched_access() { _mismatched_access = true; }
139  bool is_mismatched_access() const { return _mismatched_access; }
140
141#ifndef PRODUCT
142  static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
143  virtual void dump_spec(outputStream *st) const;
144#endif
145};
146
147//------------------------------LoadNode---------------------------------------
148// Load value; requires Memory and Address
149class LoadNode : public MemNode {
150public:
151  // Some loads (from unsafe) should be pinned: they don't depend only
152  // on the dominating test.  The field _control_dependency below records
153  // whether that node depends only on the dominating test.
154  // Methods used to build LoadNodes pass an argument of type enum
155  // ControlDependency instead of a boolean because those methods
156  // typically have multiple boolean parameters with default values:
157  // passing the wrong boolean to one of these parameters by mistake
158  // goes easily unnoticed. Using an enum, the compiler can check that
159  // the type of a value and the type of the parameter match.
160  enum ControlDependency {
161    Pinned,
162    DependsOnlyOnTest
163  };
164private:
165  // LoadNode::hash() doesn't take the _control_dependency field
166  // into account: If the graph already has a non-pinned LoadNode and
167  // we add a pinned LoadNode with the same inputs, it's safe for GVN
168  // to replace the pinned LoadNode with the non-pinned LoadNode,
169  // otherwise it wouldn't be safe to have a non pinned LoadNode with
170  // those inputs in the first place. If the graph already has a
171  // pinned LoadNode and we add a non pinned LoadNode with the same
172  // inputs, it's safe (but suboptimal) for GVN to replace the
173  // non-pinned LoadNode by the pinned LoadNode.
174  ControlDependency _control_dependency;
175
176  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
177  // loads that can be reordered, and such requiring acquire semantics to
178  // adhere to the Java specification.  The required behaviour is stored in
179  // this field.
180  const MemOrd _mo;
181
182protected:
183  virtual uint cmp(const Node &n) const;
184  virtual uint size_of() const; // Size is bigger
185  // Should LoadNode::Ideal() attempt to remove control edges?
186  virtual bool can_remove_control() const;
187  const Type* const _type;      // What kind of value is loaded?
188
189  virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
190public:
191
192  LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
193    : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _control_dependency(control_dependency) {
194    init_class_id(Class_Load);
195  }
196  inline bool is_unordered() const { return !is_acquire(); }
197  inline bool is_acquire() const {
198    assert(_mo == unordered || _mo == acquire, "unexpected");
199    return _mo == acquire;
200  }
201  inline bool is_unsigned() const {
202    int lop = Opcode();
203    return (lop == Op_LoadUB) || (lop == Op_LoadUS);
204  }
205
206  // Polymorphic factory method:
207  static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
208                    const TypePtr* at, const Type *rt, BasicType bt,
209                    MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
210                    bool unaligned = false, bool mismatched = false);
211
212  virtual uint hash()   const;  // Check the type
213
214  // Handle algebraic identities here.  If we have an identity, return the Node
215  // we are equivalent to.  We look for Load of a Store.
216  virtual Node* Identity(PhaseGVN* phase);
217
218  // If the load is from Field memory and the pointer is non-null, it might be possible to
219  // zero out the control input.
220  // If the offset is constant and the base is an object allocation,
221  // try to hook me up to the exact initializing store.
222  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
223
224  // Split instance field load through Phi.
225  Node* split_through_phi(PhaseGVN *phase);
226
227  // Recover original value from boxed values
228  Node *eliminate_autobox(PhaseGVN *phase);
229
230  // Compute a new Type for this node.  Basically we just do the pre-check,
231  // then call the virtual add() to set the type.
232  virtual const Type* Value(PhaseGVN* phase) const;
233
234  // Common methods for LoadKlass and LoadNKlass nodes.
235  const Type* klass_value_common(PhaseGVN* phase) const;
236  Node* klass_identity_common(PhaseGVN* phase);
237
238  virtual uint ideal_reg() const;
239  virtual const Type *bottom_type() const;
240  // Following method is copied from TypeNode:
241  void set_type(const Type* t) {
242    assert(t != NULL, "sanity");
243    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
244    *(const Type**)&_type = t;   // cast away const-ness
245    // If this node is in the hash table, make sure it doesn't need a rehash.
246    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
247  }
248  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
249
250  // Do not match memory edge
251  virtual uint match_edge(uint idx) const;
252
253  // Map a load opcode to its corresponding store opcode.
254  virtual int store_Opcode() const = 0;
255
256  // Check if the load's memory input is a Phi node with the same control.
257  bool is_instance_field_load_with_local_phi(Node* ctrl);
258
259  Node* convert_to_unsigned_load(PhaseGVN& gvn);
260  Node* convert_to_signed_load(PhaseGVN& gvn);
261
262#ifndef PRODUCT
263  virtual void dump_spec(outputStream *st) const;
264#endif
265#ifdef ASSERT
266  // Helper function to allow a raw load without control edge for some cases
267  static bool is_immutable_value(Node* adr);
268#endif
269protected:
270  const Type* load_array_final_field(const TypeKlassPtr *tkls,
271                                     ciKlass* klass) const;
272
273  Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const;
274
275  // depends_only_on_test is almost always true, and needs to be almost always
276  // true to enable key hoisting & commoning optimizations.  However, for the
277  // special case of RawPtr loads from TLS top & end, and other loads performed by
278  // GC barriers, the control edge carries the dependence preventing hoisting past
279  // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
280  // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
281  // which produce results (new raw memory state) inside of loops preventing all
282  // manner of other optimizations).  Basically, it's ugly but so is the alternative.
283  // See comment in macro.cpp, around line 125 expand_allocate_common().
284  virtual bool depends_only_on_test() const {
285    return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
286  }
287};
288
289//------------------------------LoadBNode--------------------------------------
290// Load a byte (8bits signed) from memory
291class LoadBNode : public LoadNode {
292public:
293  LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
294    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
295  virtual int Opcode() const;
296  virtual uint ideal_reg() const { return Op_RegI; }
297  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
298  virtual const Type* Value(PhaseGVN* phase) const;
299  virtual int store_Opcode() const { return Op_StoreB; }
300  virtual BasicType memory_type() const { return T_BYTE; }
301};
302
303//------------------------------LoadUBNode-------------------------------------
304// Load a unsigned byte (8bits unsigned) from memory
305class LoadUBNode : public LoadNode {
306public:
307  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
308    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
309  virtual int Opcode() const;
310  virtual uint ideal_reg() const { return Op_RegI; }
311  virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
312  virtual const Type* Value(PhaseGVN* phase) const;
313  virtual int store_Opcode() const { return Op_StoreB; }
314  virtual BasicType memory_type() const { return T_BYTE; }
315};
316
317//------------------------------LoadUSNode-------------------------------------
318// Load an unsigned short/char (16bits unsigned) from memory
319class LoadUSNode : public LoadNode {
320public:
321  LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
322    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
323  virtual int Opcode() const;
324  virtual uint ideal_reg() const { return Op_RegI; }
325  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
326  virtual const Type* Value(PhaseGVN* phase) const;
327  virtual int store_Opcode() const { return Op_StoreC; }
328  virtual BasicType memory_type() const { return T_CHAR; }
329};
330
331//------------------------------LoadSNode--------------------------------------
332// Load a short (16bits signed) from memory
333class LoadSNode : public LoadNode {
334public:
335  LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
336    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
337  virtual int Opcode() const;
338  virtual uint ideal_reg() const { return Op_RegI; }
339  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
340  virtual const Type* Value(PhaseGVN* phase) const;
341  virtual int store_Opcode() const { return Op_StoreC; }
342  virtual BasicType memory_type() const { return T_SHORT; }
343};
344
345//------------------------------LoadINode--------------------------------------
346// Load an integer from memory
347class LoadINode : public LoadNode {
348public:
349  LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
350    : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
351  virtual int Opcode() const;
352  virtual uint ideal_reg() const { return Op_RegI; }
353  virtual int store_Opcode() const { return Op_StoreI; }
354  virtual BasicType memory_type() const { return T_INT; }
355};
356
357//------------------------------LoadRangeNode----------------------------------
358// Load an array length from the array
359class LoadRangeNode : public LoadINode {
360public:
361  LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
362    : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
363  virtual int Opcode() const;
364  virtual const Type* Value(PhaseGVN* phase) const;
365  virtual Node* Identity(PhaseGVN* phase);
366  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
367};
368
369//------------------------------LoadLNode--------------------------------------
370// Load a long from memory
371class LoadLNode : public LoadNode {
372  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
373  virtual uint cmp( const Node &n ) const {
374    return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
375      && LoadNode::cmp(n);
376  }
377  virtual uint size_of() const { return sizeof(*this); }
378  const bool _require_atomic_access;  // is piecewise load forbidden?
379
380public:
381  LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
382            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
383    : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
384  virtual int Opcode() const;
385  virtual uint ideal_reg() const { return Op_RegL; }
386  virtual int store_Opcode() const { return Op_StoreL; }
387  virtual BasicType memory_type() const { return T_LONG; }
388  bool require_atomic_access() const { return _require_atomic_access; }
389  static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
390                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
391                                bool unaligned = false, bool mismatched = false);
392#ifndef PRODUCT
393  virtual void dump_spec(outputStream *st) const {
394    LoadNode::dump_spec(st);
395    if (_require_atomic_access)  st->print(" Atomic!");
396  }
397#endif
398};
399
400//------------------------------LoadL_unalignedNode----------------------------
401// Load a long from unaligned memory
402class LoadL_unalignedNode : public LoadLNode {
403public:
404  LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
405    : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
406  virtual int Opcode() const;
407};
408
409//------------------------------LoadFNode--------------------------------------
410// Load a float (64 bits) from memory
411class LoadFNode : public LoadNode {
412public:
413  LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
414    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
415  virtual int Opcode() const;
416  virtual uint ideal_reg() const { return Op_RegF; }
417  virtual int store_Opcode() const { return Op_StoreF; }
418  virtual BasicType memory_type() const { return T_FLOAT; }
419};
420
421//------------------------------LoadDNode--------------------------------------
422// Load a double (64 bits) from memory
423class LoadDNode : public LoadNode {
424  virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
425  virtual uint cmp( const Node &n ) const {
426    return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
427      && LoadNode::cmp(n);
428  }
429  virtual uint size_of() const { return sizeof(*this); }
430  const bool _require_atomic_access;  // is piecewise load forbidden?
431
432public:
433  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
434            MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
435    : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
436  virtual int Opcode() const;
437  virtual uint ideal_reg() const { return Op_RegD; }
438  virtual int store_Opcode() const { return Op_StoreD; }
439  virtual BasicType memory_type() const { return T_DOUBLE; }
440  bool require_atomic_access() const { return _require_atomic_access; }
441  static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
442                                const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
443                                bool unaligned = false, bool mismatched = false);
444#ifndef PRODUCT
445  virtual void dump_spec(outputStream *st) const {
446    LoadNode::dump_spec(st);
447    if (_require_atomic_access)  st->print(" Atomic!");
448  }
449#endif
450};
451
452//------------------------------LoadD_unalignedNode----------------------------
453// Load a double from unaligned memory
454class LoadD_unalignedNode : public LoadDNode {
455public:
456  LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
457    : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
458  virtual int Opcode() const;
459};
460
461//------------------------------LoadPNode--------------------------------------
462// Load a pointer from memory (either object or array)
463class LoadPNode : public LoadNode {
464public:
465  LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
466    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
467  virtual int Opcode() const;
468  virtual uint ideal_reg() const { return Op_RegP; }
469  virtual int store_Opcode() const { return Op_StoreP; }
470  virtual BasicType memory_type() const { return T_ADDRESS; }
471};
472
473
474//------------------------------LoadNNode--------------------------------------
475// Load a narrow oop from memory (either object or array)
476class LoadNNode : public LoadNode {
477public:
478  LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
479    : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
480  virtual int Opcode() const;
481  virtual uint ideal_reg() const { return Op_RegN; }
482  virtual int store_Opcode() const { return Op_StoreN; }
483  virtual BasicType memory_type() const { return T_NARROWOOP; }
484};
485
486//------------------------------LoadKlassNode----------------------------------
487// Load a Klass from an object
488class LoadKlassNode : public LoadPNode {
489protected:
490  // In most cases, LoadKlassNode does not have the control input set. If the control
491  // input is set, it must not be removed (by LoadNode::Ideal()).
492  virtual bool can_remove_control() const;
493public:
494  LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
495    : LoadPNode(c, mem, adr, at, tk, mo) {}
496  virtual int Opcode() const;
497  virtual const Type* Value(PhaseGVN* phase) const;
498  virtual Node* Identity(PhaseGVN* phase);
499  virtual bool depends_only_on_test() const { return true; }
500
501  // Polymorphic factory method:
502  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
503                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
504};
505
506//------------------------------LoadNKlassNode---------------------------------
507// Load a narrow Klass from an object.
508class LoadNKlassNode : public LoadNNode {
509public:
510  LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
511    : LoadNNode(c, mem, adr, at, tk, mo) {}
512  virtual int Opcode() const;
513  virtual uint ideal_reg() const { return Op_RegN; }
514  virtual int store_Opcode() const { return Op_StoreNKlass; }
515  virtual BasicType memory_type() const { return T_NARROWKLASS; }
516
517  virtual const Type* Value(PhaseGVN* phase) const;
518  virtual Node* Identity(PhaseGVN* phase);
519  virtual bool depends_only_on_test() const { return true; }
520};
521
522
523//------------------------------StoreNode--------------------------------------
524// Store value; requires Store, Address and Value
525class StoreNode : public MemNode {
526private:
527  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
528  // stores that can be reordered, and such requiring release semantics to
529  // adhere to the Java specification.  The required behaviour is stored in
530  // this field.
531  const MemOrd _mo;
532  // Needed for proper cloning.
533  virtual uint size_of() const { return sizeof(*this); }
534protected:
535  virtual uint cmp( const Node &n ) const;
536  virtual bool depends_only_on_test() const { return false; }
537
538  Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
539  Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
540
541public:
542  // We must ensure that stores of object references will be visible
543  // only after the object's initialization. So the callers of this
544  // procedure must indicate that the store requires `release'
545  // semantics, if the stored value is an object reference that might
546  // point to a new object and may become externally visible.
547  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
548    : MemNode(c, mem, adr, at, val), _mo(mo) {
549    init_class_id(Class_Store);
550  }
551  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
552    : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
553    init_class_id(Class_Store);
554  }
555
556  inline bool is_unordered() const { return !is_release(); }
557  inline bool is_release() const {
558    assert((_mo == unordered || _mo == release), "unexpected");
559    return _mo == release;
560  }
561
562  // Conservatively release stores of object references in order to
563  // ensure visibility of object initialization.
564  static inline MemOrd release_if_reference(const BasicType t) {
565#ifdef AARCH64
566    // AArch64 doesn't need a release store here because object
567    // initialization contains the necessary barriers.
568    return unordered;
569#else
570    const MemOrd mo = (t == T_ARRAY ||
571                       t == T_ADDRESS || // Might be the address of an object reference (`boxing').
572                       t == T_OBJECT) ? release : unordered;
573    return mo;
574#endif
575  }
576
577  // Polymorphic factory method
578  //
579  // We must ensure that stores of object references will be visible
580  // only after the object's initialization. So the callers of this
581  // procedure must indicate that the store requires `release'
582  // semantics, if the stored value is an object reference that might
583  // point to a new object and may become externally visible.
584  static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
585                         const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
586
587  virtual uint hash() const;    // Check the type
588
589  // If the store is to Field memory and the pointer is non-null, we can
590  // zero out the control input.
591  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
592
593  // Compute a new Type for this node.  Basically we just do the pre-check,
594  // then call the virtual add() to set the type.
595  virtual const Type* Value(PhaseGVN* phase) const;
596
597  // Check for identity function on memory (Load then Store at same address)
598  virtual Node* Identity(PhaseGVN* phase);
599
600  // Do not match memory edge
601  virtual uint match_edge(uint idx) const;
602
603  virtual const Type *bottom_type() const;  // returns Type::MEMORY
604
605  // Map a store opcode to its corresponding own opcode, trivially.
606  virtual int store_Opcode() const { return Opcode(); }
607
608  // have all possible loads of the value stored been optimized away?
609  bool value_never_loaded(PhaseTransform *phase) const;
610};
611
612//------------------------------StoreBNode-------------------------------------
613// Store byte to memory
614class StoreBNode : public StoreNode {
615public:
616  StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
617    : StoreNode(c, mem, adr, at, val, mo) {}
618  virtual int Opcode() const;
619  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
620  virtual BasicType memory_type() const { return T_BYTE; }
621};
622
623//------------------------------StoreCNode-------------------------------------
624// Store char/short to memory
625class StoreCNode : public StoreNode {
626public:
627  StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
628    : StoreNode(c, mem, adr, at, val, mo) {}
629  virtual int Opcode() const;
630  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
631  virtual BasicType memory_type() const { return T_CHAR; }
632};
633
634//------------------------------StoreINode-------------------------------------
635// Store int to memory
636class StoreINode : public StoreNode {
637public:
638  StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
639    : StoreNode(c, mem, adr, at, val, mo) {}
640  virtual int Opcode() const;
641  virtual BasicType memory_type() const { return T_INT; }
642};
643
644//------------------------------StoreLNode-------------------------------------
645// Store long to memory
646class StoreLNode : public StoreNode {
647  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
648  virtual uint cmp( const Node &n ) const {
649    return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
650      && StoreNode::cmp(n);
651  }
652  virtual uint size_of() const { return sizeof(*this); }
653  const bool _require_atomic_access;  // is piecewise store forbidden?
654
655public:
656  StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
657    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
658  virtual int Opcode() const;
659  virtual BasicType memory_type() const { return T_LONG; }
660  bool require_atomic_access() const { return _require_atomic_access; }
661  static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
662#ifndef PRODUCT
663  virtual void dump_spec(outputStream *st) const {
664    StoreNode::dump_spec(st);
665    if (_require_atomic_access)  st->print(" Atomic!");
666  }
667#endif
668};
669
670//------------------------------StoreFNode-------------------------------------
671// Store float to memory
672class StoreFNode : public StoreNode {
673public:
674  StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
675    : StoreNode(c, mem, adr, at, val, mo) {}
676  virtual int Opcode() const;
677  virtual BasicType memory_type() const { return T_FLOAT; }
678};
679
680//------------------------------StoreDNode-------------------------------------
681// Store double to memory
682class StoreDNode : public StoreNode {
683  virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
684  virtual uint cmp( const Node &n ) const {
685    return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
686      && StoreNode::cmp(n);
687  }
688  virtual uint size_of() const { return sizeof(*this); }
689  const bool _require_atomic_access;  // is piecewise store forbidden?
690public:
691  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
692             MemOrd mo, bool require_atomic_access = false)
693    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
694  virtual int Opcode() const;
695  virtual BasicType memory_type() const { return T_DOUBLE; }
696  bool require_atomic_access() const { return _require_atomic_access; }
697  static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
698#ifndef PRODUCT
699  virtual void dump_spec(outputStream *st) const {
700    StoreNode::dump_spec(st);
701    if (_require_atomic_access)  st->print(" Atomic!");
702  }
703#endif
704
705};
706
707//------------------------------StorePNode-------------------------------------
708// Store pointer to memory
709class StorePNode : public StoreNode {
710public:
711  StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
712    : StoreNode(c, mem, adr, at, val, mo) {}
713  virtual int Opcode() const;
714  virtual BasicType memory_type() const { return T_ADDRESS; }
715};
716
717//------------------------------StoreNNode-------------------------------------
718// Store narrow oop to memory
719class StoreNNode : public StoreNode {
720public:
721  StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
722    : StoreNode(c, mem, adr, at, val, mo) {}
723  virtual int Opcode() const;
724  virtual BasicType memory_type() const { return T_NARROWOOP; }
725};
726
727//------------------------------StoreNKlassNode--------------------------------------
728// Store narrow klass to memory
729class StoreNKlassNode : public StoreNNode {
730public:
731  StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
732    : StoreNNode(c, mem, adr, at, val, mo) {}
733  virtual int Opcode() const;
734  virtual BasicType memory_type() const { return T_NARROWKLASS; }
735};
736
737//------------------------------StoreCMNode-----------------------------------
738// Store card-mark byte to memory for CM
739// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
740// Preceeding equivalent StoreCMs may be eliminated.
741class StoreCMNode : public StoreNode {
742 private:
743  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
744  virtual uint cmp( const Node &n ) const {
745    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
746      && StoreNode::cmp(n);
747  }
748  virtual uint size_of() const { return sizeof(*this); }
749  int _oop_alias_idx;   // The alias_idx of OopStore
750
751public:
752  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
753    StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
754    _oop_alias_idx(oop_alias_idx) {
755    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
756           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
757           "bad oop alias idx");
758  }
759  virtual int Opcode() const;
760  virtual Node* Identity(PhaseGVN* phase);
761  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
762  virtual const Type* Value(PhaseGVN* phase) const;
763  virtual BasicType memory_type() const { return T_VOID; } // unspecific
764  int oop_alias_idx() const { return _oop_alias_idx; }
765};
766
767//------------------------------LoadPLockedNode---------------------------------
768// Load-locked a pointer from memory (either object or array).
769// On Sparc & Intel this is implemented as a normal pointer load.
770// On PowerPC and friends it's a real load-locked.
771class LoadPLockedNode : public LoadPNode {
772public:
773  LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
774    : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
775  virtual int Opcode() const;
776  virtual int store_Opcode() const { return Op_StorePConditional; }
777  virtual bool depends_only_on_test() const { return true; }
778};
779
780//------------------------------SCMemProjNode---------------------------------------
781// This class defines a projection of the memory  state of a store conditional node.
782// These nodes return a value, but also update memory.
783class SCMemProjNode : public ProjNode {
784public:
785  enum {SCMEMPROJCON = (uint)-2};
786  SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
787  virtual int Opcode() const;
788  virtual bool      is_CFG() const  { return false; }
789  virtual const Type *bottom_type() const {return Type::MEMORY;}
790  virtual const TypePtr *adr_type() const {
791    Node* ctrl = in(0);
792    if (ctrl == NULL)  return NULL; // node is dead
793    return ctrl->in(MemNode::Memory)->adr_type();
794  }
795  virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
796  virtual const Type* Value(PhaseGVN* phase) const;
797#ifndef PRODUCT
798  virtual void dump_spec(outputStream *st) const {};
799#endif
800};
801
802//------------------------------LoadStoreNode---------------------------
803// Note: is_Mem() method returns 'true' for this class.
804class LoadStoreNode : public Node {
805private:
806  const Type* const _type;      // What kind of value is loaded?
807  const TypePtr* _adr_type;     // What kind of memory is being addressed?
808  virtual uint size_of() const; // Size is bigger
809public:
810  LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
811  virtual bool depends_only_on_test() const { return false; }
812  virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
813
814  virtual const Type *bottom_type() const { return _type; }
815  virtual uint ideal_reg() const;
816  virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
817
818  bool result_not_used() const;
819};
820
821class LoadStoreConditionalNode : public LoadStoreNode {
822public:
823  enum {
824    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
825  };
826  LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
827};
828
829//------------------------------StorePConditionalNode---------------------------
830// Conditionally store pointer to memory, if no change since prior
831// load-locked.  Sets flags for success or failure of the store.
832class StorePConditionalNode : public LoadStoreConditionalNode {
833public:
834  StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
835  virtual int Opcode() const;
836  // Produces flags
837  virtual uint ideal_reg() const { return Op_RegFlags; }
838};
839
840//------------------------------StoreIConditionalNode---------------------------
841// Conditionally store int to memory, if no change since prior
842// load-locked.  Sets flags for success or failure of the store.
843class StoreIConditionalNode : public LoadStoreConditionalNode {
844public:
845  StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
846  virtual int Opcode() const;
847  // Produces flags
848  virtual uint ideal_reg() const { return Op_RegFlags; }
849};
850
851//------------------------------StoreLConditionalNode---------------------------
852// Conditionally store long to memory, if no change since prior
853// load-locked.  Sets flags for success or failure of the store.
854class StoreLConditionalNode : public LoadStoreConditionalNode {
855public:
856  StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
857  virtual int Opcode() const;
858  // Produces flags
859  virtual uint ideal_reg() const { return Op_RegFlags; }
860};
861
862class CompareAndSwapNode : public LoadStoreConditionalNode {
863private:
864  const MemNode::MemOrd _mem_ord;
865public:
866  CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {}
867  MemNode::MemOrd order() const {
868    return _mem_ord;
869  }
870};
871
872class CompareAndExchangeNode : public LoadStoreNode {
873private:
874  const MemNode::MemOrd _mem_ord;
875public:
876  enum {
877    ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
878  };
879  CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) :
880    LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) {
881     init_req(ExpectedIn, ex );
882  }
883
884  MemNode::MemOrd order() const {
885    return _mem_ord;
886  }
887};
888
889//------------------------------CompareAndSwapBNode---------------------------
890class CompareAndSwapBNode : public CompareAndSwapNode {
891public:
892  CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
893  virtual int Opcode() const;
894};
895
896//------------------------------CompareAndSwapSNode---------------------------
897class CompareAndSwapSNode : public CompareAndSwapNode {
898public:
899  CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
900  virtual int Opcode() const;
901};
902
903//------------------------------CompareAndSwapINode---------------------------
904class CompareAndSwapINode : public CompareAndSwapNode {
905public:
906  CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
907  virtual int Opcode() const;
908};
909
910//------------------------------CompareAndSwapLNode---------------------------
911class CompareAndSwapLNode : public CompareAndSwapNode {
912public:
913  CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
914  virtual int Opcode() const;
915};
916
917//------------------------------CompareAndSwapPNode---------------------------
918class CompareAndSwapPNode : public CompareAndSwapNode {
919public:
920  CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
921  virtual int Opcode() const;
922};
923
924//------------------------------CompareAndSwapNNode---------------------------
925class CompareAndSwapNNode : public CompareAndSwapNode {
926public:
927  CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
928  virtual int Opcode() const;
929};
930
931//------------------------------WeakCompareAndSwapBNode---------------------------
932class WeakCompareAndSwapBNode : public CompareAndSwapNode {
933public:
934  WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
935  virtual int Opcode() const;
936};
937
938//------------------------------WeakCompareAndSwapSNode---------------------------
939class WeakCompareAndSwapSNode : public CompareAndSwapNode {
940public:
941  WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
942  virtual int Opcode() const;
943};
944
945//------------------------------WeakCompareAndSwapINode---------------------------
946class WeakCompareAndSwapINode : public CompareAndSwapNode {
947public:
948  WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
949  virtual int Opcode() const;
950};
951
952//------------------------------WeakCompareAndSwapLNode---------------------------
953class WeakCompareAndSwapLNode : public CompareAndSwapNode {
954public:
955  WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
956  virtual int Opcode() const;
957};
958
959//------------------------------WeakCompareAndSwapPNode---------------------------
960class WeakCompareAndSwapPNode : public CompareAndSwapNode {
961public:
962  WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
963  virtual int Opcode() const;
964};
965
966//------------------------------WeakCompareAndSwapNNode---------------------------
967class WeakCompareAndSwapNNode : public CompareAndSwapNode {
968public:
969  WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { }
970  virtual int Opcode() const;
971};
972
973//------------------------------CompareAndExchangeBNode---------------------------
974class CompareAndExchangeBNode : public CompareAndExchangeNode {
975public:
976  CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { }
977  virtual int Opcode() const;
978};
979
980
981//------------------------------CompareAndExchangeSNode---------------------------
982class CompareAndExchangeSNode : public CompareAndExchangeNode {
983public:
984  CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { }
985  virtual int Opcode() const;
986};
987
988//------------------------------CompareAndExchangeLNode---------------------------
989class CompareAndExchangeLNode : public CompareAndExchangeNode {
990public:
991  CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { }
992  virtual int Opcode() const;
993};
994
995
996//------------------------------CompareAndExchangeINode---------------------------
997class CompareAndExchangeINode : public CompareAndExchangeNode {
998public:
999  CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { }
1000  virtual int Opcode() const;
1001};
1002
1003
1004//------------------------------CompareAndExchangePNode---------------------------
1005class CompareAndExchangePNode : public CompareAndExchangeNode {
1006public:
1007  CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1008  virtual int Opcode() const;
1009};
1010
1011//------------------------------CompareAndExchangeNNode---------------------------
1012class CompareAndExchangeNNode : public CompareAndExchangeNode {
1013public:
1014  CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { }
1015  virtual int Opcode() const;
1016};
1017
1018//------------------------------GetAndAddBNode---------------------------
1019class GetAndAddBNode : public LoadStoreNode {
1020public:
1021  GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1022  virtual int Opcode() const;
1023};
1024
1025//------------------------------GetAndAddSNode---------------------------
1026class GetAndAddSNode : public LoadStoreNode {
1027public:
1028  GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1029  virtual int Opcode() const;
1030};
1031
1032//------------------------------GetAndAddINode---------------------------
1033class GetAndAddINode : public LoadStoreNode {
1034public:
1035  GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1036  virtual int Opcode() const;
1037};
1038
1039//------------------------------GetAndAddLNode---------------------------
1040class GetAndAddLNode : public LoadStoreNode {
1041public:
1042  GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1043  virtual int Opcode() const;
1044};
1045
1046//------------------------------GetAndSetBNode---------------------------
1047class GetAndSetBNode : public LoadStoreNode {
1048public:
1049  GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { }
1050  virtual int Opcode() const;
1051};
1052
1053//------------------------------GetAndSetSNode---------------------------
1054class GetAndSetSNode : public LoadStoreNode {
1055public:
1056  GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { }
1057  virtual int Opcode() const;
1058};
1059
1060//------------------------------GetAndSetINode---------------------------
1061class GetAndSetINode : public LoadStoreNode {
1062public:
1063  GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
1064  virtual int Opcode() const;
1065};
1066
1067//------------------------------GetAndSetLNode---------------------------
1068class GetAndSetLNode : public LoadStoreNode {
1069public:
1070  GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
1071  virtual int Opcode() const;
1072};
1073
1074//------------------------------GetAndSetPNode---------------------------
1075class GetAndSetPNode : public LoadStoreNode {
1076public:
1077  GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1078  virtual int Opcode() const;
1079};
1080
1081//------------------------------GetAndSetNNode---------------------------
1082class GetAndSetNNode : public LoadStoreNode {
1083public:
1084  GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
1085  virtual int Opcode() const;
1086};
1087
1088//------------------------------ClearArray-------------------------------------
1089class ClearArrayNode: public Node {
1090private:
1091  bool _is_large;
1092public:
1093  ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large)
1094    : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) {
1095    init_class_id(Class_ClearArray);
1096  }
1097  virtual int         Opcode() const;
1098  virtual const Type *bottom_type() const { return Type::MEMORY; }
1099  // ClearArray modifies array elements, and so affects only the
1100  // array memory addressed by the bottom_type of its base address.
1101  virtual const class TypePtr *adr_type() const;
1102  virtual Node* Identity(PhaseGVN* phase);
1103  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1104  virtual uint match_edge(uint idx) const;
1105  bool is_large() const { return _is_large; }
1106
1107  // Clear the given area of an object or array.
1108  // The start offset must always be aligned mod BytesPerInt.
1109  // The end offset must always be aligned mod BytesPerLong.
1110  // Return the new memory.
1111  static Node* clear_memory(Node* control, Node* mem, Node* dest,
1112                            intptr_t start_offset,
1113                            intptr_t end_offset,
1114                            PhaseGVN* phase);
1115  static Node* clear_memory(Node* control, Node* mem, Node* dest,
1116                            intptr_t start_offset,
1117                            Node* end_offset,
1118                            PhaseGVN* phase);
1119  static Node* clear_memory(Node* control, Node* mem, Node* dest,
1120                            Node* start_offset,
1121                            Node* end_offset,
1122                            PhaseGVN* phase);
1123  // Return allocation input memory edge if it is different instance
1124  // or itself if it is the one we are looking for.
1125  static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
1126};
1127
1128//------------------------------MemBar-----------------------------------------
1129// There are different flavors of Memory Barriers to match the Java Memory
1130// Model.  Monitor-enter and volatile-load act as Aquires: no following ref
1131// can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
1132// volatile-load.  Monitor-exit and volatile-store act as Release: no
1133// preceding ref can be moved to after them.  We insert a MemBar-Release
1134// before a FastUnlock or volatile-store.  All volatiles need to be
1135// serialized, so we follow all volatile-stores with a MemBar-Volatile to
1136// separate it from any following volatile-load.
1137class MemBarNode: public MultiNode {
1138  virtual uint hash() const ;                  // { return NO_HASH; }
1139  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1140
1141  virtual uint size_of() const { return sizeof(*this); }
1142  // Memory type this node is serializing.  Usually either rawptr or bottom.
1143  const TypePtr* _adr_type;
1144
1145public:
1146  enum {
1147    Precedent = TypeFunc::Parms  // optional edge to force precedence
1148  };
1149  MemBarNode(Compile* C, int alias_idx, Node* precedent);
1150  virtual int Opcode() const = 0;
1151  virtual const class TypePtr *adr_type() const { return _adr_type; }
1152  virtual const Type* Value(PhaseGVN* phase) const;
1153  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1154  virtual uint match_edge(uint idx) const { return 0; }
1155  virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1156  virtual Node *match( const ProjNode *proj, const Matcher *m );
1157  // Factory method.  Builds a wide or narrow membar.
1158  // Optional 'precedent' becomes an extra edge if not null.
1159  static MemBarNode* make(Compile* C, int opcode,
1160                          int alias_idx = Compile::AliasIdxBot,
1161                          Node* precedent = NULL);
1162};
1163
1164// "Acquire" - no following ref can move before (but earlier refs can
1165// follow, like an early Load stalled in cache).  Requires multi-cpu
1166// visibility.  Inserted after a volatile load.
1167class MemBarAcquireNode: public MemBarNode {
1168public:
1169  MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1170    : MemBarNode(C, alias_idx, precedent) {}
1171  virtual int Opcode() const;
1172};
1173
1174// "Acquire" - no following ref can move before (but earlier refs can
1175// follow, like an early Load stalled in cache).  Requires multi-cpu
1176// visibility.  Inserted independ of any load, as required
1177// for intrinsic Unsafe.loadFence().
1178class LoadFenceNode: public MemBarNode {
1179public:
1180  LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1181    : MemBarNode(C, alias_idx, precedent) {}
1182  virtual int Opcode() const;
1183};
1184
1185// "Release" - no earlier ref can move after (but later refs can move
1186// up, like a speculative pipelined cache-hitting Load).  Requires
1187// multi-cpu visibility.  Inserted before a volatile store.
1188class MemBarReleaseNode: public MemBarNode {
1189public:
1190  MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1191    : MemBarNode(C, alias_idx, precedent) {}
1192  virtual int Opcode() const;
1193};
1194
1195// "Release" - no earlier ref can move after (but later refs can move
1196// up, like a speculative pipelined cache-hitting Load).  Requires
1197// multi-cpu visibility.  Inserted independent of any store, as required
1198// for intrinsic Unsafe.storeFence().
1199class StoreFenceNode: public MemBarNode {
1200public:
1201  StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1202    : MemBarNode(C, alias_idx, precedent) {}
1203  virtual int Opcode() const;
1204};
1205
1206// "Acquire" - no following ref can move before (but earlier refs can
1207// follow, like an early Load stalled in cache).  Requires multi-cpu
1208// visibility.  Inserted after a FastLock.
1209class MemBarAcquireLockNode: public MemBarNode {
1210public:
1211  MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1212    : MemBarNode(C, alias_idx, precedent) {}
1213  virtual int Opcode() const;
1214};
1215
1216// "Release" - no earlier ref can move after (but later refs can move
1217// up, like a speculative pipelined cache-hitting Load).  Requires
1218// multi-cpu visibility.  Inserted before a FastUnLock.
1219class MemBarReleaseLockNode: public MemBarNode {
1220public:
1221  MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1222    : MemBarNode(C, alias_idx, precedent) {}
1223  virtual int Opcode() const;
1224};
1225
1226class MemBarStoreStoreNode: public MemBarNode {
1227public:
1228  MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1229    : MemBarNode(C, alias_idx, precedent) {
1230    init_class_id(Class_MemBarStoreStore);
1231  }
1232  virtual int Opcode() const;
1233};
1234
1235// Ordering between a volatile store and a following volatile load.
1236// Requires multi-CPU visibility?
1237class MemBarVolatileNode: public MemBarNode {
1238public:
1239  MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1240    : MemBarNode(C, alias_idx, precedent) {}
1241  virtual int Opcode() const;
1242};
1243
1244// Ordering within the same CPU.  Used to order unsafe memory references
1245// inside the compiler when we lack alias info.  Not needed "outside" the
1246// compiler because the CPU does all the ordering for us.
1247class MemBarCPUOrderNode: public MemBarNode {
1248public:
1249  MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1250    : MemBarNode(C, alias_idx, precedent) {}
1251  virtual int Opcode() const;
1252  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1253};
1254
1255class OnSpinWaitNode: public MemBarNode {
1256public:
1257  OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent)
1258    : MemBarNode(C, alias_idx, precedent) {}
1259  virtual int Opcode() const;
1260};
1261
1262// Isolation of object setup after an AllocateNode and before next safepoint.
1263// (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1264class InitializeNode: public MemBarNode {
1265  friend class AllocateNode;
1266
1267  enum {
1268    Incomplete    = 0,
1269    Complete      = 1,
1270    WithArraycopy = 2
1271  };
1272  int _is_complete;
1273
1274  bool _does_not_escape;
1275
1276public:
1277  enum {
1278    Control    = TypeFunc::Control,
1279    Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
1280    RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
1281    RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
1282  };
1283
1284  InitializeNode(Compile* C, int adr_type, Node* rawoop);
1285  virtual int Opcode() const;
1286  virtual uint size_of() const { return sizeof(*this); }
1287  virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1288  virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
1289
1290  // Manage incoming memory edges via a MergeMem on in(Memory):
1291  Node* memory(uint alias_idx);
1292
1293  // The raw memory edge coming directly from the Allocation.
1294  // The contents of this memory are *always* all-zero-bits.
1295  Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1296
1297  // Return the corresponding allocation for this initialization (or null if none).
1298  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1299  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1300  AllocateNode* allocation();
1301
1302  // Anything other than zeroing in this init?
1303  bool is_non_zero();
1304
1305  // An InitializeNode must completed before macro expansion is done.
1306  // Completion requires that the AllocateNode must be followed by
1307  // initialization of the new memory to zero, then to any initializers.
1308  bool is_complete() { return _is_complete != Incomplete; }
1309  bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1310
1311  // Mark complete.  (Must not yet be complete.)
1312  void set_complete(PhaseGVN* phase);
1313  void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1314
1315  bool does_not_escape() { return _does_not_escape; }
1316  void set_does_not_escape() { _does_not_escape = true; }
1317
1318#ifdef ASSERT
1319  // ensure all non-degenerate stores are ordered and non-overlapping
1320  bool stores_are_sane(PhaseTransform* phase);
1321#endif //ASSERT
1322
1323  // See if this store can be captured; return offset where it initializes.
1324  // Return 0 if the store cannot be moved (any sort of problem).
1325  intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
1326
1327  // Capture another store; reformat it to write my internal raw memory.
1328  // Return the captured copy, else NULL if there is some sort of problem.
1329  Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
1330
1331  // Find captured store which corresponds to the range [start..start+size).
1332  // Return my own memory projection (meaning the initial zero bits)
1333  // if there is no such store.  Return NULL if there is a problem.
1334  Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
1335
1336  // Called when the associated AllocateNode is expanded into CFG.
1337  Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1338                        intptr_t header_size, Node* size_in_bytes,
1339                        PhaseGVN* phase);
1340
1341 private:
1342  void remove_extra_zeroes();
1343
1344  // Find out where a captured store should be placed (or already is placed).
1345  int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1346                                     PhaseTransform* phase);
1347
1348  static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
1349
1350  Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
1351
1352  bool detect_init_independence(Node* n, int& count);
1353
1354  void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1355                               PhaseGVN* phase);
1356
1357  intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1358};
1359
1360//------------------------------MergeMem---------------------------------------
1361// (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1362class MergeMemNode: public Node {
1363  virtual uint hash() const ;                  // { return NO_HASH; }
1364  virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1365  friend class MergeMemStream;
1366  MergeMemNode(Node* def);  // clients use MergeMemNode::make
1367
1368public:
1369  // If the input is a whole memory state, clone it with all its slices intact.
1370  // Otherwise, make a new memory state with just that base memory input.
1371  // In either case, the result is a newly created MergeMem.
1372  static MergeMemNode* make(Node* base_memory);
1373
1374  virtual int Opcode() const;
1375  virtual Node* Identity(PhaseGVN* phase);
1376  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1377  virtual uint ideal_reg() const { return NotAMachineReg; }
1378  virtual uint match_edge(uint idx) const { return 0; }
1379  virtual const RegMask &out_RegMask() const;
1380  virtual const Type *bottom_type() const { return Type::MEMORY; }
1381  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1382  // sparse accessors
1383  // Fetch the previously stored "set_memory_at", or else the base memory.
1384  // (Caller should clone it if it is a phi-nest.)
1385  Node* memory_at(uint alias_idx) const;
1386  // set the memory, regardless of its previous value
1387  void set_memory_at(uint alias_idx, Node* n);
1388  // the "base" is the memory that provides the non-finite support
1389  Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1390  // warning: setting the base can implicitly set any of the other slices too
1391  void set_base_memory(Node* def);
1392  // sentinel value which denotes a copy of the base memory:
1393  Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1394  static Node* make_empty_memory(); // where the sentinel comes from
1395  bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1396  // hook for the iterator, to perform any necessary setup
1397  void iteration_setup(const MergeMemNode* other = NULL);
1398  // push sentinels until I am at least as long as the other (semantic no-op)
1399  void grow_to_match(const MergeMemNode* other);
1400  bool verify_sparse() const PRODUCT_RETURN0;
1401#ifndef PRODUCT
1402  virtual void dump_spec(outputStream *st) const;
1403#endif
1404};
1405
1406class MergeMemStream : public StackObj {
1407 private:
1408  MergeMemNode*       _mm;
1409  const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1410  Node*               _mm_base;  // loop-invariant base memory of _mm
1411  int                 _idx;
1412  int                 _cnt;
1413  Node*               _mem;
1414  Node*               _mem2;
1415  int                 _cnt2;
1416
1417  void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1418    // subsume_node will break sparseness at times, whenever a memory slice
1419    // folds down to a copy of the base ("fat") memory.  In such a case,
1420    // the raw edge will update to base, although it should be top.
1421    // This iterator will recognize either top or base_memory as an
1422    // "empty" slice.  See is_empty, is_empty2, and next below.
1423    //
1424    // The sparseness property is repaired in MergeMemNode::Ideal.
1425    // As long as access to a MergeMem goes through this iterator
1426    // or the memory_at accessor, flaws in the sparseness will
1427    // never be observed.
1428    //
1429    // Also, iteration_setup repairs sparseness.
1430    assert(mm->verify_sparse(), "please, no dups of base");
1431    assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1432
1433    _mm  = mm;
1434    _mm_base = mm->base_memory();
1435    _mm2 = mm2;
1436    _cnt = mm->req();
1437    _idx = Compile::AliasIdxBot-1; // start at the base memory
1438    _mem = NULL;
1439    _mem2 = NULL;
1440  }
1441
1442#ifdef ASSERT
1443  Node* check_memory() const {
1444    if (at_base_memory())
1445      return _mm->base_memory();
1446    else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1447      return _mm->memory_at(_idx);
1448    else
1449      return _mm_base;
1450  }
1451  Node* check_memory2() const {
1452    return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1453  }
1454#endif
1455
1456  static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1457  void assert_synch() const {
1458    assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1459           "no side-effects except through the stream");
1460  }
1461
1462 public:
1463
1464  // expected usages:
1465  // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1466  // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1467
1468  // iterate over one merge
1469  MergeMemStream(MergeMemNode* mm) {
1470    mm->iteration_setup();
1471    init(mm);
1472    debug_only(_cnt2 = 999);
1473  }
1474  // iterate in parallel over two merges
1475  // only iterates through non-empty elements of mm2
1476  MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1477    assert(mm2, "second argument must be a MergeMem also");
1478    ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1479    mm->iteration_setup(mm2);
1480    init(mm, mm2);
1481    _cnt2 = mm2->req();
1482  }
1483#ifdef ASSERT
1484  ~MergeMemStream() {
1485    assert_synch();
1486  }
1487#endif
1488
1489  MergeMemNode* all_memory() const {
1490    return _mm;
1491  }
1492  Node* base_memory() const {
1493    assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1494    return _mm_base;
1495  }
1496  const MergeMemNode* all_memory2() const {
1497    assert(_mm2 != NULL, "");
1498    return _mm2;
1499  }
1500  bool at_base_memory() const {
1501    return _idx == Compile::AliasIdxBot;
1502  }
1503  int alias_idx() const {
1504    assert(_mem, "must call next 1st");
1505    return _idx;
1506  }
1507
1508  const TypePtr* adr_type() const {
1509    return Compile::current()->get_adr_type(alias_idx());
1510  }
1511
1512  const TypePtr* adr_type(Compile* C) const {
1513    return C->get_adr_type(alias_idx());
1514  }
1515  bool is_empty() const {
1516    assert(_mem, "must call next 1st");
1517    assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1518    return _mem->is_top();
1519  }
1520  bool is_empty2() const {
1521    assert(_mem2, "must call next 1st");
1522    assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1523    return _mem2->is_top();
1524  }
1525  Node* memory() const {
1526    assert(!is_empty(), "must not be empty");
1527    assert_synch();
1528    return _mem;
1529  }
1530  // get the current memory, regardless of empty or non-empty status
1531  Node* force_memory() const {
1532    assert(!is_empty() || !at_base_memory(), "");
1533    // Use _mm_base to defend against updates to _mem->base_memory().
1534    Node *mem = _mem->is_top() ? _mm_base : _mem;
1535    assert(mem == check_memory(), "");
1536    return mem;
1537  }
1538  Node* memory2() const {
1539    assert(_mem2 == check_memory2(), "");
1540    return _mem2;
1541  }
1542  void set_memory(Node* mem) {
1543    if (at_base_memory()) {
1544      // Note that this does not change the invariant _mm_base.
1545      _mm->set_base_memory(mem);
1546    } else {
1547      _mm->set_memory_at(_idx, mem);
1548    }
1549    _mem = mem;
1550    assert_synch();
1551  }
1552
1553  // Recover from a side effect to the MergeMemNode.
1554  void set_memory() {
1555    _mem = _mm->in(_idx);
1556  }
1557
1558  bool next()  { return next(false); }
1559  bool next2() { return next(true); }
1560
1561  bool next_non_empty()  { return next_non_empty(false); }
1562  bool next_non_empty2() { return next_non_empty(true); }
1563  // next_non_empty2 can yield states where is_empty() is true
1564
1565 private:
1566  // find the next item, which might be empty
1567  bool next(bool have_mm2) {
1568    assert((_mm2 != NULL) == have_mm2, "use other next");
1569    assert_synch();
1570    if (++_idx < _cnt) {
1571      // Note:  This iterator allows _mm to be non-sparse.
1572      // It behaves the same whether _mem is top or base_memory.
1573      _mem = _mm->in(_idx);
1574      if (have_mm2)
1575        _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1576      return true;
1577    }
1578    return false;
1579  }
1580
1581  // find the next non-empty item
1582  bool next_non_empty(bool have_mm2) {
1583    while (next(have_mm2)) {
1584      if (!is_empty()) {
1585        // make sure _mem2 is filled in sensibly
1586        if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1587        return true;
1588      } else if (have_mm2 && !is_empty2()) {
1589        return true;   // is_empty() == true
1590      }
1591    }
1592    return false;
1593  }
1594};
1595
1596//------------------------------Prefetch---------------------------------------
1597
1598// Allocation prefetch which may fault, TLAB size have to be adjusted.
1599class PrefetchAllocationNode : public Node {
1600public:
1601  PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
1602  virtual int Opcode() const;
1603  virtual uint ideal_reg() const { return NotAMachineReg; }
1604  virtual uint match_edge(uint idx) const { return idx==2; }
1605  virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1606};
1607
1608#endif // SHARE_VM_OPTO_MEMNODE_HPP
1609