callnode.hpp revision 0:a61af66fc99e
1/*
2 * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Portions of code courtesy of Clifford Click
26
27// Optimization - Graph Style
28
29class Chaitin;
30class NamedCounter;
31class MultiNode;
32class  SafePointNode;
33class   CallNode;
34class     CallJavaNode;
35class       CallStaticJavaNode;
36class       CallDynamicJavaNode;
37class     CallRuntimeNode;
38class       CallLeafNode;
39class         CallLeafNoFPNode;
40class     AllocateNode;
41class     AllocateArrayNode;
42class     LockNode;
43class     UnlockNode;
44class JVMState;
45class OopMap;
46class State;
47class StartNode;
48class MachCallNode;
49class FastLockNode;
50
51//------------------------------StartNode--------------------------------------
52// The method start node
53class StartNode : public MultiNode {
54  virtual uint cmp( const Node &n ) const;
55  virtual uint size_of() const; // Size is bigger
56public:
57  const TypeTuple *_domain;
58  StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
59    init_class_id(Class_Start);
60    init_flags(Flag_is_block_start);
61    init_req(0,this);
62    init_req(1,root);
63  }
64  virtual int Opcode() const;
65  virtual bool pinned() const { return true; };
66  virtual const Type *bottom_type() const;
67  virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
68  virtual const Type *Value( PhaseTransform *phase ) const;
69  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
70  virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
71  virtual const RegMask &in_RegMask(uint) const;
72  virtual Node *match( const ProjNode *proj, const Matcher *m );
73  virtual uint ideal_reg() const { return 0; }
74#ifndef PRODUCT
75  virtual void  dump_spec(outputStream *st) const;
76#endif
77};
78
79//------------------------------StartOSRNode-----------------------------------
80// The method start node for on stack replacement code
81class StartOSRNode : public StartNode {
82public:
83  StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
84  virtual int   Opcode() const;
85  static  const TypeTuple *osr_domain();
86};
87
88
89//------------------------------ParmNode---------------------------------------
90// Incoming parameters
91class ParmNode : public ProjNode {
92  static const char * const names[TypeFunc::Parms+1];
93public:
94  ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {}
95  virtual int Opcode() const;
96  virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
97  virtual uint ideal_reg() const;
98#ifndef PRODUCT
99  virtual void dump_spec(outputStream *st) const;
100#endif
101};
102
103
104//------------------------------ReturnNode-------------------------------------
105// Return from subroutine node
106class ReturnNode : public Node {
107public:
108  ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
109  virtual int Opcode() const;
110  virtual bool  is_CFG() const { return true; }
111  virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
112  virtual bool depends_only_on_test() const { return false; }
113  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
114  virtual const Type *Value( PhaseTransform *phase ) const;
115  virtual uint ideal_reg() const { return NotAMachineReg; }
116  virtual uint match_edge(uint idx) const;
117#ifndef PRODUCT
118  virtual void dump_req() const;
119#endif
120};
121
122
123//------------------------------RethrowNode------------------------------------
124// Rethrow of exception at call site.  Ends a procedure before rethrowing;
125// ends the current basic block like a ReturnNode.  Restores registers and
126// unwinds stack.  Rethrow happens in the caller's method.
127class RethrowNode : public Node {
128 public:
129  RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
130  virtual int Opcode() const;
131  virtual bool  is_CFG() const { return true; }
132  virtual uint hash() const { return NO_HASH; }  // CFG nodes do not hash
133  virtual bool depends_only_on_test() const { return false; }
134  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
135  virtual const Type *Value( PhaseTransform *phase ) const;
136  virtual uint match_edge(uint idx) const;
137  virtual uint ideal_reg() const { return NotAMachineReg; }
138#ifndef PRODUCT
139  virtual void dump_req() const;
140#endif
141};
142
143
144//------------------------------TailCallNode-----------------------------------
145// Pop stack frame and jump indirect
146class TailCallNode : public ReturnNode {
147public:
148  TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
149    : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
150    init_req(TypeFunc::Parms, target);
151    init_req(TypeFunc::Parms+1, moop);
152  }
153
154  virtual int Opcode() const;
155  virtual uint match_edge(uint idx) const;
156};
157
158//------------------------------TailJumpNode-----------------------------------
159// Pop stack frame and jump indirect
160class TailJumpNode : public ReturnNode {
161public:
162  TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
163    : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
164    init_req(TypeFunc::Parms, target);
165    init_req(TypeFunc::Parms+1, ex_oop);
166  }
167
168  virtual int Opcode() const;
169  virtual uint match_edge(uint idx) const;
170};
171
172//-------------------------------JVMState-------------------------------------
173// A linked list of JVMState nodes captures the whole interpreter state,
174// plus GC roots, for all active calls at some call site in this compilation
175// unit.  (If there is no inlining, then the list has exactly one link.)
176// This provides a way to map the optimized program back into the interpreter,
177// or to let the GC mark the stack.
178class JVMState : public ResourceObj {
179private:
180  JVMState*         _caller;    // List pointer for forming scope chains
181  uint              _depth;     // One mroe than caller depth, or one.
182  uint              _locoff;    // Offset to locals in input edge mapping
183  uint              _stkoff;    // Offset to stack in input edge mapping
184  uint              _monoff;    // Offset to monitors in input edge mapping
185  uint              _endoff;    // Offset to end of input edge mapping
186  uint              _sp;        // Jave Expression Stack Pointer for this state
187  int               _bci;       // Byte Code Index of this JVM point
188  ciMethod*         _method;    // Method Pointer
189  SafePointNode*    _map;       // Map node associated with this scope
190public:
191  friend class Compile;
192
193  // Because JVMState objects live over the entire lifetime of the
194  // Compile object, they are allocated into the comp_arena, which
195  // does not get resource marked or reset during the compile process
196  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
197  void operator delete( void * ) { } // fast deallocation
198
199  // Create a new JVMState, ready for abstract interpretation.
200  JVMState(ciMethod* method, JVMState* caller);
201  JVMState(int stack_size);  // root state; has a null method
202
203  // Access functions for the JVM
204  uint              locoff() const { return _locoff; }
205  uint              stkoff() const { return _stkoff; }
206  uint              argoff() const { return _stkoff + _sp; }
207  uint              monoff() const { return _monoff; }
208  uint              endoff() const { return _endoff; }
209  uint              oopoff() const { return debug_end(); }
210
211  int            loc_size() const { return _stkoff - _locoff; }
212  int            stk_size() const { return _monoff - _stkoff; }
213  int            mon_size() const { return _endoff - _monoff; }
214
215  bool        is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
216  bool        is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
217  bool        is_mon(uint i) const { return i >= _monoff && i < _endoff; }
218
219  uint              sp()     const { return _sp; }
220  int               bci()    const { return _bci; }
221  bool          has_method() const { return _method != NULL; }
222  ciMethod*         method() const { assert(has_method(), ""); return _method; }
223  JVMState*         caller() const { return _caller; }
224  SafePointNode*    map()    const { return _map; }
225  uint              depth()  const { return _depth; }
226  uint        debug_start()  const; // returns locoff of root caller
227  uint        debug_end()    const; // returns endoff of self
228  uint        debug_size()   const { return loc_size() + sp() + mon_size(); }
229  uint        debug_depth()  const; // returns sum of debug_size values at all depths
230
231  // Returns the JVM state at the desired depth (1 == root).
232  JVMState* of_depth(int d) const;
233
234  // Tells if two JVM states have the same call chain (depth, methods, & bcis).
235  bool same_calls_as(const JVMState* that) const;
236
237  // Monitors (monitors are stored as (boxNode, objNode) pairs
238  enum { logMonitorEdges = 1 };
239  int  nof_monitors()              const { return mon_size() >> logMonitorEdges; }
240  int  monitor_depth()             const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
241  int  monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
242  int  monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
243  bool is_monitor_box(uint off)    const {
244    assert(is_mon(off), "should be called only for monitor edge");
245    return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
246  }
247  bool is_monitor_use(uint off)    const { return (is_mon(off)
248                                                   && is_monitor_box(off))
249                                             || (caller() && caller()->is_monitor_use(off)); }
250
251  // Initialization functions for the JVM
252  void              set_locoff(uint off) { _locoff = off; }
253  void              set_stkoff(uint off) { _stkoff = off; }
254  void              set_monoff(uint off) { _monoff = off; }
255  void              set_endoff(uint off) { _endoff = off; }
256  void              set_offsets(uint off) { _locoff = _stkoff = _monoff = _endoff = off; }
257  void              set_map(SafePointNode *map) { _map = map; }
258  void              set_sp(uint sp) { _sp = sp; }
259  void              set_bci(int bci) { _bci = bci; }
260
261  // Miscellaneous utility functions
262  JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
263  JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
264
265#ifndef PRODUCT
266  void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
267  void      dump_spec(outputStream *st) const;
268  void      dump_on(outputStream* st) const;
269  void      dump() const {
270    dump_on(tty);
271  }
272#endif
273};
274
275//------------------------------SafePointNode----------------------------------
276// A SafePointNode is a subclass of a MultiNode for convenience (and
277// potential code sharing) only - conceptually it is independent of
278// the Node semantics.
279class SafePointNode : public MultiNode {
280  virtual uint           cmp( const Node &n ) const;
281  virtual uint           size_of() const;       // Size is bigger
282
283public:
284  SafePointNode(uint edges, JVMState* jvms,
285                // A plain safepoint advertises no memory effects (NULL):
286                const TypePtr* adr_type = NULL)
287    : MultiNode( edges ),
288      _jvms(jvms),
289      _oop_map(NULL),
290      _adr_type(adr_type)
291  {
292    init_class_id(Class_SafePoint);
293  }
294
295  OopMap*         _oop_map;   // Array of OopMap info (8-bit char) for GC
296  JVMState* const _jvms;      // Pointer to list of JVM State objects
297  const TypePtr*  _adr_type;  // What type of memory does this node produce?
298
299  // Many calls take *all* of memory as input,
300  // but some produce a limited subset of that memory as output.
301  // The adr_type reports the call's behavior as a store, not a load.
302
303  virtual JVMState* jvms() const { return _jvms; }
304  void set_jvms(JVMState* s) {
305    *(JVMState**)&_jvms = s;  // override const attribute in the accessor
306  }
307  OopMap *oop_map() const { return _oop_map; }
308  void set_oop_map(OopMap *om) { _oop_map = om; }
309
310  // Functionality from old debug nodes which has changed
311  Node *local(JVMState* jvms, uint idx) const {
312    assert(verify_jvms(jvms), "jvms must match");
313    return in(jvms->locoff() + idx);
314  }
315  Node *stack(JVMState* jvms, uint idx) const {
316    assert(verify_jvms(jvms), "jvms must match");
317    return in(jvms->stkoff() + idx);
318  }
319  Node *argument(JVMState* jvms, uint idx) const {
320    assert(verify_jvms(jvms), "jvms must match");
321    return in(jvms->argoff() + idx);
322  }
323  Node *monitor_box(JVMState* jvms, uint idx) const {
324    assert(verify_jvms(jvms), "jvms must match");
325    return in(jvms->monitor_box_offset(idx));
326  }
327  Node *monitor_obj(JVMState* jvms, uint idx) const {
328    assert(verify_jvms(jvms), "jvms must match");
329    return in(jvms->monitor_obj_offset(idx));
330  }
331
332  void  set_local(JVMState* jvms, uint idx, Node *c);
333
334  void  set_stack(JVMState* jvms, uint idx, Node *c) {
335    assert(verify_jvms(jvms), "jvms must match");
336    set_req(jvms->stkoff() + idx, c);
337  }
338  void  set_argument(JVMState* jvms, uint idx, Node *c) {
339    assert(verify_jvms(jvms), "jvms must match");
340    set_req(jvms->argoff() + idx, c);
341  }
342  void ensure_stack(JVMState* jvms, uint stk_size) {
343    assert(verify_jvms(jvms), "jvms must match");
344    int grow_by = (int)stk_size - (int)jvms->stk_size();
345    if (grow_by > 0)  grow_stack(jvms, grow_by);
346  }
347  void grow_stack(JVMState* jvms, uint grow_by);
348  // Handle monitor stack
349  void push_monitor( const FastLockNode *lock );
350  void pop_monitor ();
351  Node *peek_monitor_box() const;
352  Node *peek_monitor_obj() const;
353
354  // Access functions for the JVM
355  Node *control  () const { return in(TypeFunc::Control  ); }
356  Node *i_o      () const { return in(TypeFunc::I_O      ); }
357  Node *memory   () const { return in(TypeFunc::Memory   ); }
358  Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
359  Node *frameptr () const { return in(TypeFunc::FramePtr ); }
360
361  void set_control  ( Node *c ) { set_req(TypeFunc::Control,c); }
362  void set_i_o      ( Node *c ) { set_req(TypeFunc::I_O    ,c); }
363  void set_memory   ( Node *c ) { set_req(TypeFunc::Memory ,c); }
364
365  MergeMemNode* merged_memory() const {
366    return in(TypeFunc::Memory)->as_MergeMem();
367  }
368
369  // The parser marks useless maps as dead when it's done with them:
370  bool is_killed() { return in(TypeFunc::Control) == NULL; }
371
372  // Exception states bubbling out of subgraphs such as inlined calls
373  // are recorded here.  (There might be more than one, hence the "next".)
374  // This feature is used only for safepoints which serve as "maps"
375  // for JVM states during parsing, intrinsic expansion, etc.
376  SafePointNode*         next_exception() const;
377  void               set_next_exception(SafePointNode* n);
378  bool                   has_exceptions() const { return next_exception() != NULL; }
379
380  // Standard Node stuff
381  virtual int            Opcode() const;
382  virtual bool           pinned() const { return true; }
383  virtual const Type    *Value( PhaseTransform *phase ) const;
384  virtual const Type    *bottom_type() const { return Type::CONTROL; }
385  virtual const TypePtr *adr_type() const { return _adr_type; }
386  virtual Node          *Ideal(PhaseGVN *phase, bool can_reshape);
387  virtual Node          *Identity( PhaseTransform *phase );
388  virtual uint           ideal_reg() const { return 0; }
389  virtual const RegMask &in_RegMask(uint) const;
390  virtual const RegMask &out_RegMask() const;
391  virtual uint           match_edge(uint idx) const;
392
393  static  bool           needs_polling_address_input();
394
395#ifndef PRODUCT
396  virtual void              dump_spec(outputStream *st) const;
397#endif
398};
399
400//------------------------------CallNode---------------------------------------
401// Call nodes now subsume the function of debug nodes at callsites, so they
402// contain the functionality of a full scope chain of debug nodes.
403class CallNode : public SafePointNode {
404public:
405  const TypeFunc *_tf;        // Function type
406  address      _entry_point;  // Address of method being called
407  float        _cnt;          // Estimate of number of times called
408  PointsToNode::EscapeState _escape_state;
409
410  CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
411    : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
412      _tf(tf),
413      _entry_point(addr),
414      _cnt(COUNT_UNKNOWN)
415  {
416    init_class_id(Class_Call);
417    init_flags(Flag_is_Call);
418    _escape_state = PointsToNode::UnknownEscape;
419  }
420
421  const TypeFunc* tf()        const { return _tf; }
422  const address entry_point() const { return _entry_point; }
423  const float   cnt()         const { return _cnt; }
424
425  void set_tf(const TypeFunc* tf) { _tf = tf; }
426  void set_entry_point(address p) { _entry_point = p; }
427  void set_cnt(float c)           { _cnt = c; }
428
429  virtual const Type *bottom_type() const;
430  virtual const Type *Value( PhaseTransform *phase ) const;
431  virtual Node *Identity( PhaseTransform *phase ) { return this; }
432  virtual uint        cmp( const Node &n ) const;
433  virtual uint        size_of() const = 0;
434  virtual void        calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
435  virtual Node       *match( const ProjNode *proj, const Matcher *m );
436  virtual uint        ideal_reg() const { return NotAMachineReg; }
437  // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
438  // for some macro nodes whose expansion does not have a safepoint on the fast path.
439  virtual bool        guaranteed_safepoint()  { return true; }
440  // For macro nodes, the JVMState gets modified during expansion, so when cloning
441  // the node the JVMState must be cloned.
442  virtual void        clone_jvms() { }   // default is not to clone
443
444  virtual uint match_edge(uint idx) const;
445
446#ifndef PRODUCT
447  virtual void        dump_req()  const;
448  virtual void        dump_spec(outputStream *st) const;
449#endif
450};
451
452//------------------------------CallJavaNode-----------------------------------
453// Make a static or dynamic subroutine call node using Java calling
454// convention.  (The "Java" calling convention is the compiler's calling
455// convention, as opposed to the interpreter's or that of native C.)
456class CallJavaNode : public CallNode {
457protected:
458  virtual uint cmp( const Node &n ) const;
459  virtual uint size_of() const; // Size is bigger
460
461  bool    _optimized_virtual;
462  ciMethod* _method;            // Method being direct called
463public:
464  const int       _bci;         // Byte Code Index of call byte code
465  CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
466    : CallNode(tf, addr, TypePtr::BOTTOM),
467      _method(method), _bci(bci), _optimized_virtual(false)
468  {
469    init_class_id(Class_CallJava);
470  }
471
472  virtual int   Opcode() const;
473  ciMethod* method() const                { return _method; }
474  void  set_method(ciMethod *m)           { _method = m; }
475  void  set_optimized_virtual(bool f)     { _optimized_virtual = f; }
476  bool  is_optimized_virtual() const      { return _optimized_virtual; }
477
478#ifndef PRODUCT
479  virtual void  dump_spec(outputStream *st) const;
480#endif
481};
482
483//------------------------------CallStaticJavaNode-----------------------------
484// Make a direct subroutine call using Java calling convention (for static
485// calls and optimized virtual calls, plus calls to wrappers for run-time
486// routines); generates static stub.
487class CallStaticJavaNode : public CallJavaNode {
488  virtual uint cmp( const Node &n ) const;
489  virtual uint size_of() const; // Size is bigger
490public:
491  CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
492    : CallJavaNode(tf, addr, method, bci), _name(NULL) {
493    init_class_id(Class_CallStaticJava);
494  }
495  CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
496                     const TypePtr* adr_type)
497    : CallJavaNode(tf, addr, NULL, bci), _name(name) {
498    init_class_id(Class_CallStaticJava);
499    // This node calls a runtime stub, which often has narrow memory effects.
500    _adr_type = adr_type;
501  }
502  const char *_name;            // Runtime wrapper name
503
504  // If this is an uncommon trap, return the request code, else zero.
505  int uncommon_trap_request() const;
506  static int extract_uncommon_trap_request(const Node* call);
507
508  virtual int         Opcode() const;
509#ifndef PRODUCT
510  virtual void        dump_spec(outputStream *st) const;
511#endif
512};
513
514//------------------------------CallDynamicJavaNode----------------------------
515// Make a dispatched call using Java calling convention.
516class CallDynamicJavaNode : public CallJavaNode {
517  virtual uint cmp( const Node &n ) const;
518  virtual uint size_of() const; // Size is bigger
519public:
520  CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
521    init_class_id(Class_CallDynamicJava);
522  }
523
524  int _vtable_index;
525  virtual int   Opcode() const;
526#ifndef PRODUCT
527  virtual void  dump_spec(outputStream *st) const;
528#endif
529};
530
531//------------------------------CallRuntimeNode--------------------------------
532// Make a direct subroutine call node into compiled C++ code.
533class CallRuntimeNode : public CallNode {
534  virtual uint cmp( const Node &n ) const;
535  virtual uint size_of() const; // Size is bigger
536public:
537  CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
538                  const TypePtr* adr_type)
539    : CallNode(tf, addr, adr_type),
540      _name(name)
541  {
542    init_class_id(Class_CallRuntime);
543  }
544
545  const char *_name;            // Printable name, if _method is NULL
546  virtual int   Opcode() const;
547  virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
548
549#ifndef PRODUCT
550  virtual void  dump_spec(outputStream *st) const;
551#endif
552};
553
554//------------------------------CallLeafNode-----------------------------------
555// Make a direct subroutine call node into compiled C++ code, without
556// safepoints
557class CallLeafNode : public CallRuntimeNode {
558public:
559  CallLeafNode(const TypeFunc* tf, address addr, const char* name,
560               const TypePtr* adr_type)
561    : CallRuntimeNode(tf, addr, name, adr_type)
562  {
563    init_class_id(Class_CallLeaf);
564  }
565  virtual int   Opcode() const;
566  virtual bool        guaranteed_safepoint()  { return false; }
567#ifndef PRODUCT
568  virtual void  dump_spec(outputStream *st) const;
569#endif
570};
571
572//------------------------------CallLeafNoFPNode-------------------------------
573// CallLeafNode, not using floating point or using it in the same manner as
574// the generated code
575class CallLeafNoFPNode : public CallLeafNode {
576public:
577  CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
578                   const TypePtr* adr_type)
579    : CallLeafNode(tf, addr, name, adr_type)
580  {
581  }
582  virtual int   Opcode() const;
583};
584
585
586//------------------------------Allocate---------------------------------------
587// High-level memory allocation
588//
589//  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
590//  get expanded into a code sequence containing a call.  Unlike other CallNodes,
591//  they have 2 memory projections and 2 i_o projections (which are distinguished by
592//  the _is_io_use flag in the projection.)  This is needed when expanding the node in
593//  order to differentiate the uses of the projection on the normal control path from
594//  those on the exception return path.
595//
596class AllocateNode : public CallNode {
597public:
598  enum {
599    // Output:
600    RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
601    // Inputs:
602    AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
603    KlassNode,                        // type (maybe dynamic) of the obj.
604    InitialTest,                      // slow-path test (may be constant)
605    ALength,                          // array length (or TOP if none)
606    ParmLimit
607  };
608
609  static const TypeFunc* alloc_type() {
610    const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
611    fields[AllocSize]   = TypeInt::POS;
612    fields[KlassNode]   = TypeInstPtr::NOTNULL;
613    fields[InitialTest] = TypeInt::BOOL;
614    fields[ALength]     = TypeInt::INT;  // length (can be a bad length)
615
616    const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
617
618    // create result type (range)
619    fields = TypeTuple::fields(1);
620    fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
621
622    const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
623
624    return TypeFunc::make(domain, range);
625  }
626
627  virtual uint size_of() const; // Size is bigger
628  AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
629               Node *size, Node *klass_node, Node *initial_test);
630  // Expansion modifies the JVMState, so we need to clone it
631  virtual void  clone_jvms() {
632    set_jvms(jvms()->clone_deep(Compile::current()));
633  }
634  virtual int Opcode() const;
635  virtual uint ideal_reg() const { return Op_RegP; }
636  virtual bool        guaranteed_safepoint()  { return false; }
637
638  // Pattern-match a possible usage of AllocateNode.
639  // Return null if no allocation is recognized.
640  // The operand is the pointer produced by the (possible) allocation.
641  // It must be a projection of the Allocate or its subsequent CastPP.
642  // (Note:  This function is defined in file graphKit.cpp, near
643  // GraphKit::new_instance/new_array, whose output it recognizes.)
644  // The 'ptr' may not have an offset unless the 'offset' argument is given.
645  static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
646
647  // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
648  // an offset, which is reported back to the caller.
649  // (Note:  AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
650  static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
651                                        intptr_t& offset);
652
653  // Dig the klass operand out of a (possible) allocation site.
654  static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
655    AllocateNode* allo = Ideal_allocation(ptr, phase);
656    return (allo == NULL) ? NULL : allo->in(KlassNode);
657  }
658
659  // Conservatively small estimate of offset of first non-header byte.
660  int minimum_header_size() {
661    return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc);
662  }
663
664  // Return the corresponding initialization barrier (or null if none).
665  // Walks out edges to find it...
666  // (Note: Both InitializeNode::allocation and AllocateNode::initialization
667  // are defined in graphKit.cpp, which sets up the bidirectional relation.)
668  InitializeNode* initialization();
669
670  // Convenience for initialization->maybe_set_complete(phase)
671  bool maybe_set_complete(PhaseGVN* phase);
672};
673
674//------------------------------AllocateArray---------------------------------
675//
676// High-level array allocation
677//
678class AllocateArrayNode : public AllocateNode {
679public:
680  AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
681                    Node* size, Node* klass_node, Node* initial_test,
682                    Node* count_val
683                    )
684    : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
685                   initial_test)
686  {
687    init_class_id(Class_AllocateArray);
688    set_req(AllocateNode::ALength,        count_val);
689  }
690  virtual int Opcode() const;
691  virtual uint size_of() const; // Size is bigger
692
693  // Pattern-match a possible usage of AllocateArrayNode.
694  // Return null if no allocation is recognized.
695  static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
696    AllocateNode* allo = Ideal_allocation(ptr, phase);
697    return (allo == NULL || !allo->is_AllocateArray())
698           ? NULL : allo->as_AllocateArray();
699  }
700
701  // Dig the length operand out of a (possible) array allocation site.
702  static Node* Ideal_length(Node* ptr, PhaseTransform* phase) {
703    AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase);
704    return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength);
705  }
706};
707
708//------------------------------AbstractLockNode-----------------------------------
709class AbstractLockNode: public CallNode {
710private:
711 bool _eliminate;    // indicates this lock can be safely eliminated
712#ifndef PRODUCT
713  NamedCounter* _counter;
714#endif
715
716protected:
717  // helper functions for lock elimination
718  //
719
720  bool find_matching_unlock(const Node* ctrl, LockNode* lock,
721                            GrowableArray<AbstractLockNode*> &lock_ops);
722  bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
723                                       GrowableArray<AbstractLockNode*> &lock_ops);
724  bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
725                               GrowableArray<AbstractLockNode*> &lock_ops);
726  LockNode *find_matching_lock(UnlockNode* unlock);
727
728
729public:
730  AbstractLockNode(const TypeFunc *tf)
731    : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
732      _eliminate(false)
733  {
734#ifndef PRODUCT
735    _counter = NULL;
736#endif
737  }
738  virtual int Opcode() const = 0;
739  Node *   obj_node() const       {return in(TypeFunc::Parms + 0); }
740  Node *   box_node() const       {return in(TypeFunc::Parms + 1); }
741  Node *   fastlock_node() const  {return in(TypeFunc::Parms + 2); }
742  const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
743
744  virtual uint size_of() const { return sizeof(*this); }
745
746  bool is_eliminated()         {return _eliminate; }
747  // mark node as eliminated and update the counter if there is one
748  void set_eliminated();
749
750#ifndef PRODUCT
751  void create_lock_counter(JVMState* s);
752  NamedCounter* counter() const { return _counter; }
753#endif
754};
755
756//------------------------------Lock---------------------------------------
757// High-level lock operation
758//
759// This is a subclass of CallNode because it is a macro node which gets expanded
760// into a code sequence containing a call.  This node takes 3 "parameters":
761//    0  -  object to lock
762//    1 -   a BoxLockNode
763//    2 -   a FastLockNode
764//
765class LockNode : public AbstractLockNode {
766public:
767
768  static const TypeFunc *lock_type() {
769    // create input type (domain)
770    const Type **fields = TypeTuple::fields(3);
771    fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;  // Object to be Locked
772    fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM;    // Address of stack location for lock
773    fields[TypeFunc::Parms+2] = TypeInt::BOOL;         // FastLock
774    const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
775
776    // create result type (range)
777    fields = TypeTuple::fields(0);
778
779    const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
780
781    return TypeFunc::make(domain,range);
782  }
783
784  virtual int Opcode() const;
785  virtual uint size_of() const; // Size is bigger
786  LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
787    init_class_id(Class_Lock);
788    init_flags(Flag_is_macro);
789    C->add_macro_node(this);
790  }
791  virtual bool        guaranteed_safepoint()  { return false; }
792
793  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
794  // Expansion modifies the JVMState, so we need to clone it
795  virtual void  clone_jvms() {
796    set_jvms(jvms()->clone_deep(Compile::current()));
797  }
798};
799
800//------------------------------Unlock---------------------------------------
801// High-level unlock operation
802class UnlockNode : public AbstractLockNode {
803public:
804  virtual int Opcode() const;
805  virtual uint size_of() const; // Size is bigger
806  UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
807    init_class_id(Class_Unlock);
808    init_flags(Flag_is_macro);
809    C->add_macro_node(this);
810  }
811  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
812  // unlock is never a safepoint
813  virtual bool        guaranteed_safepoint()  { return false; }
814};
815