graphKit.hpp revision 1879:f95d63e2154a
1/*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
26#define SHARE_VM_OPTO_GRAPHKIT_HPP
27
28#include "ci/ciEnv.hpp"
29#include "ci/ciMethodData.hpp"
30#include "opto/addnode.hpp"
31#include "opto/callnode.hpp"
32#include "opto/cfgnode.hpp"
33#include "opto/compile.hpp"
34#include "opto/divnode.hpp"
35#include "opto/mulnode.hpp"
36#include "opto/phaseX.hpp"
37#include "opto/subnode.hpp"
38#include "opto/type.hpp"
39#include "runtime/deoptimization.hpp"
40
41class FastLockNode;
42class FastUnlockNode;
43class IdealKit;
44class Parse;
45class RootNode;
46
47//-----------------------------------------------------------------------------
48//----------------------------GraphKit-----------------------------------------
49// Toolkit for building the common sorts of subgraphs.
50// Does not know about bytecode parsing or type-flow results.
51// It is able to create graphs implementing the semantics of most
52// or all bytecodes, so that it can expand intrinsics and calls.
53// It may depend on JVMState structure, but it must not depend
54// on specific bytecode streams.
55class GraphKit : public Phase {
56  friend class PreserveJVMState;
57
58 protected:
59  ciEnv*            _env;       // Compilation environment
60  PhaseGVN         &_gvn;       // Some optimizations while parsing
61  SafePointNode*    _map;       // Parser map from JVM to Nodes
62  SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
63  int               _sp;        // JVM Expression Stack Pointer
64  int               _bci;       // JVM Bytecode Pointer
65  ciMethod*         _method;    // JVM Current Method
66
67 private:
68  SafePointNode*     map_not_null() const {
69    assert(_map != NULL, "must call stopped() to test for reset compiler map");
70    return _map;
71  }
72
73 public:
74  GraphKit();                   // empty constructor
75  GraphKit(JVMState* jvms);     // the JVM state on which to operate
76
77#ifdef ASSERT
78  ~GraphKit() {
79    assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
80  }
81#endif
82
83  virtual Parse* is_Parse() const { return NULL; }
84
85  ciEnv*        env()           const { return _env; }
86  PhaseGVN&     gvn()           const { return _gvn; }
87
88  void record_for_igvn(Node* n) const { C->record_for_igvn(n); }  // delegate to Compile
89
90  // Handy well-known nodes:
91  Node*         null()          const { return zerocon(T_OBJECT); }
92  Node*         top()           const { return C->top(); }
93  RootNode*     root()          const { return C->root(); }
94
95  // Create or find a constant node
96  Node* intcon(jint con)        const { return _gvn.intcon(con); }
97  Node* longcon(jlong con)      const { return _gvn.longcon(con); }
98  Node* makecon(const Type *t)  const { return _gvn.makecon(t); }
99  Node* zerocon(BasicType bt)   const { return _gvn.zerocon(bt); }
100  // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
101
102  // Helper for byte_map_base
103  Node* byte_map_base_node() {
104    // Get base of card map
105    CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
106    assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
107    if (ct->byte_map_base != NULL) {
108      return makecon(TypeRawPtr::make((address)ct->byte_map_base));
109    } else {
110      return null();
111    }
112  }
113
114  jint  find_int_con(Node* n, jint value_if_unknown) {
115    return _gvn.find_int_con(n, value_if_unknown);
116  }
117  jlong find_long_con(Node* n, jlong value_if_unknown) {
118    return _gvn.find_long_con(n, value_if_unknown);
119  }
120  // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
121
122  // JVM State accessors:
123  // Parser mapping from JVM indices into Nodes.
124  // Low slots are accessed by the StartNode::enum.
125  // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
126  // Then come JVM stack slots.
127  // Finally come the monitors, if any.
128  // See layout accessors in class JVMState.
129
130  SafePointNode*     map()      const { return _map; }
131  bool               has_exceptions() const { return _exceptions != NULL; }
132  JVMState*          jvms()     const { return map_not_null()->_jvms; }
133  int                sp()       const { return _sp; }
134  int                bci()      const { return _bci; }
135  Bytecodes::Code    java_bc()  const;
136  ciMethod*          method()   const { return _method; }
137
138  void set_jvms(JVMState* jvms)       { set_map(jvms->map());
139                                        assert(jvms == this->jvms(), "sanity");
140                                        _sp = jvms->sp();
141                                        _bci = jvms->bci();
142                                        _method = jvms->has_method() ? jvms->method() : NULL; }
143  void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
144  void set_sp(int i)                  { assert(i >= 0, "must be non-negative"); _sp = i; }
145  void clean_stack(int from_sp); // clear garbage beyond from_sp to top
146
147  void inc_sp(int i)                  { set_sp(sp() + i); }
148  void set_bci(int bci)               { _bci = bci; }
149
150  // Make sure jvms has current bci & sp.
151  JVMState* sync_jvms()     const;
152#ifdef ASSERT
153  // Make sure JVMS has an updated copy of bci and sp.
154  // Also sanity-check method, depth, and monitor depth.
155  bool jvms_in_sync() const;
156
157  // Make sure the map looks OK.
158  void verify_map() const;
159
160  // Make sure a proposed exception state looks OK.
161  static void verify_exception_state(SafePointNode* ex_map);
162#endif
163
164  // Clone the existing map state.  (Implements PreserveJVMState.)
165  SafePointNode* clone_map();
166
167  // Set the map to a clone of the given one.
168  void set_map_clone(SafePointNode* m);
169
170  // Tell if the compilation is failing.
171  bool failing() const { return C->failing(); }
172
173  // Set _map to NULL, signalling a stop to further bytecode execution.
174  // Preserve the map intact for future use, and return it back to the caller.
175  SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
176
177  // Stop, but first smash the map's inputs to NULL, to mark it dead.
178  void stop_and_kill_map();
179
180  // Tell if _map is NULL, or control is top.
181  bool stopped();
182
183  // Tell if this method or any caller method has exception handlers.
184  bool has_ex_handler();
185
186  // Save an exception without blowing stack contents or other JVM state.
187  // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
188  static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
189
190  // Recover a saved exception from its map.
191  static Node* saved_ex_oop(SafePointNode* ex_map);
192
193  // Recover a saved exception from its map, and remove it from the map.
194  static Node* clear_saved_ex_oop(SafePointNode* ex_map);
195
196#ifdef ASSERT
197  // Recover a saved exception from its map, and remove it from the map.
198  static bool has_saved_ex_oop(SafePointNode* ex_map);
199#endif
200
201  // Push an exception in the canonical position for handlers (stack(0)).
202  void push_ex_oop(Node* ex_oop) {
203    ensure_stack(1);  // ensure room to push the exception
204    set_stack(0, ex_oop);
205    set_sp(1);
206    clean_stack(1);
207  }
208
209  // Detach and return an exception state.
210  SafePointNode* pop_exception_state() {
211    SafePointNode* ex_map = _exceptions;
212    if (ex_map != NULL) {
213      _exceptions = ex_map->next_exception();
214      ex_map->set_next_exception(NULL);
215      debug_only(verify_exception_state(ex_map));
216    }
217    return ex_map;
218  }
219
220  // Add an exception, using the given JVM state, without commoning.
221  void push_exception_state(SafePointNode* ex_map) {
222    debug_only(verify_exception_state(ex_map));
223    ex_map->set_next_exception(_exceptions);
224    _exceptions = ex_map;
225  }
226
227  // Turn the current JVM state into an exception state, appending the ex_oop.
228  SafePointNode* make_exception_state(Node* ex_oop);
229
230  // Add an exception, using the given JVM state.
231  // Combine all exceptions with a common exception type into a single state.
232  // (This is done via combine_exception_states.)
233  void add_exception_state(SafePointNode* ex_map);
234
235  // Combine all exceptions of any sort whatever into a single master state.
236  SafePointNode* combine_and_pop_all_exception_states() {
237    if (_exceptions == NULL)  return NULL;
238    SafePointNode* phi_map = pop_exception_state();
239    SafePointNode* ex_map;
240    while ((ex_map = pop_exception_state()) != NULL) {
241      combine_exception_states(ex_map, phi_map);
242    }
243    return phi_map;
244  }
245
246  // Combine the two exception states, building phis as necessary.
247  // The second argument is updated to include contributions from the first.
248  void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
249
250  // Reset the map to the given state.  If there are any half-finished phis
251  // in it (created by combine_exception_states), transform them now.
252  // Returns the exception oop.  (Caller must call push_ex_oop if required.)
253  Node* use_exception_state(SafePointNode* ex_map);
254
255  // Collect exceptions from a given JVM state into my exception list.
256  void add_exception_states_from(JVMState* jvms);
257
258  // Collect all raised exceptions into the current JVM state.
259  // Clear the current exception list and map, returns the combined states.
260  JVMState* transfer_exceptions_into_jvms();
261
262  // Helper to throw a built-in exception.
263  // Range checks take the offending index.
264  // Cast and array store checks take the offending class.
265  // Others do not take the optional argument.
266  // The JVMS must allow the bytecode to be re-executed
267  // via an uncommon trap.
268  void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
269
270  // Helper to check the JavaThread::_should_post_on_exceptions flag
271  // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
272  void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
273                                                  bool must_throw) ;
274
275  // Helper Functions for adding debug information
276  void kill_dead_locals();
277#ifdef ASSERT
278  bool dead_locals_are_killed();
279#endif
280  // The call may deoptimize.  Supply required JVM state as debug info.
281  // If must_throw is true, the call is guaranteed not to return normally.
282  void add_safepoint_edges(SafePointNode* call,
283                           bool must_throw = false);
284
285  // How many stack inputs does the current BC consume?
286  // And, how does the stack change after the bytecode?
287  // Returns false if unknown.
288  bool compute_stack_effects(int& inputs, int& depth);
289
290  // Add a fixed offset to a pointer
291  Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
292    return basic_plus_adr(base, ptr, MakeConX(offset));
293  }
294  Node* basic_plus_adr(Node* base, intptr_t offset) {
295    return basic_plus_adr(base, base, MakeConX(offset));
296  }
297  // Add a variable offset to a pointer
298  Node* basic_plus_adr(Node* base, Node* offset) {
299    return basic_plus_adr(base, base, offset);
300  }
301  Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
302
303
304  // Some convenient shortcuts for common nodes
305  Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new (C,1) IfTrueNode(iff));      }
306  Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new (C,1) IfFalseNode(iff));     }
307
308  Node* AddI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AddINode(l, r));       }
309  Node* SubI(Node* l, Node* r)                { return _gvn.transform(new (C,3) SubINode(l, r));       }
310  Node* MulI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MulINode(l, r));       }
311  Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new (C,3) DivINode(ctl, l, r));  }
312
313  Node* AndI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AndINode(l, r));       }
314  Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new (C,3) OrINode(l, r));        }
315  Node* XorI(Node* l, Node* r)                { return _gvn.transform(new (C,3) XorINode(l, r));       }
316
317  Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MaxINode(l, r));       }
318  Node* MinI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MinINode(l, r));       }
319
320  Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) LShiftINode(l, r));    }
321  Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) RShiftINode(l, r));    }
322  Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new (C,3) URShiftINode(l, r));   }
323
324  Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpINode(l, r));       }
325  Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpLNode(l, r));       }
326  Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpPNode(l, r));       }
327  Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
328
329  Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new (C,4) AddPNode(b, a, o));    }
330
331  // Convert between int and long, and size_t.
332  // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
333  Node* ConvI2L(Node* offset);
334  Node* ConvL2I(Node* offset);
335  // Find out the klass of an object.
336  Node* load_object_klass(Node* object);
337  // Find out the length of an array.
338  Node* load_array_length(Node* array);
339  // Helper function to do a NULL pointer check or ZERO check based on type.
340  Node* null_check_common(Node* value, BasicType type,
341                          bool assert_null, Node* *null_control);
342  // Throw an exception if a given value is null.
343  // Return the value cast to not-null.
344  // Be clever about equivalent dominating null checks.
345  Node* do_null_check(Node* value, BasicType type) {
346    return null_check_common(value, type, false, NULL);
347  }
348  // Throw an uncommon trap if a given value is __not__ null.
349  // Return the value cast to null, and be clever about dominating checks.
350  Node* do_null_assert(Node* value, BasicType type) {
351    return null_check_common(value, type, true, NULL);
352  }
353  // Null check oop.  Return null-path control into (*null_control).
354  // Return a cast-not-null node which depends on the not-null control.
355  // If never_see_null, use an uncommon trap (*null_control sees a top).
356  // The cast is not valid along the null path; keep a copy of the original.
357  Node* null_check_oop(Node* value, Node* *null_control,
358                       bool never_see_null = false);
359
360  // Check the null_seen bit.
361  bool seems_never_null(Node* obj, ciProfileData* data);
362
363  // Use the type profile to narrow an object type.
364  Node* maybe_cast_profiled_receiver(Node* not_null_obj,
365                                     ciProfileData* data,
366                                     ciKlass* require_klass);
367
368  // Cast obj to not-null on this path
369  Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
370  // Replace all occurrences of one node by another.
371  void replace_in_map(Node* old, Node* neww);
372
373  void push(Node* n)    { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); }
374  Node* pop()           { map_not_null(); return _map->stack(_map->_jvms,--_sp); }
375  Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); }
376
377  void push_pair(Node* ldval) {
378    push(ldval);
379    push(top());  // the halfword is merely a placeholder
380  }
381  void push_pair_local(int i) {
382    // longs are stored in locals in "push" order
383    push(  local(i+0) );  // the real value
384    assert(local(i+1) == top(), "");
385    push(top());  // halfword placeholder
386  }
387  Node* pop_pair() {
388    // the second half is pushed last & popped first; it contains exactly nothing
389    Node* halfword = pop();
390    assert(halfword == top(), "");
391    // the long bits are pushed first & popped last:
392    return pop();
393  }
394  void set_pair_local(int i, Node* lval) {
395    // longs are stored in locals as a value/half pair (like doubles)
396    set_local(i+0, lval);
397    set_local(i+1, top());
398  }
399
400  // Push the node, which may be zero, one, or two words.
401  void push_node(BasicType n_type, Node* n) {
402    int n_size = type2size[n_type];
403    if      (n_size == 1)  push(      n );  // T_INT, ...
404    else if (n_size == 2)  push_pair( n );  // T_DOUBLE, T_LONG
405    else                   { assert(n_size == 0, "must be T_VOID"); }
406  }
407
408  Node* pop_node(BasicType n_type) {
409    int n_size = type2size[n_type];
410    if      (n_size == 1)  return pop();
411    else if (n_size == 2)  return pop_pair();
412    else                   return NULL;
413  }
414
415  Node* control()               const { return map_not_null()->control(); }
416  Node* i_o()                   const { return map_not_null()->i_o(); }
417  Node* returnadr()             const { return map_not_null()->returnadr(); }
418  Node* frameptr()              const { return map_not_null()->frameptr(); }
419  Node* local(uint idx)         const { map_not_null(); return _map->local(      _map->_jvms, idx); }
420  Node* stack(uint idx)         const { map_not_null(); return _map->stack(      _map->_jvms, idx); }
421  Node* argument(uint idx)      const { map_not_null(); return _map->argument(   _map->_jvms, idx); }
422  Node* monitor_box(uint idx)   const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
423  Node* monitor_obj(uint idx)   const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
424
425  void set_control  (Node* c)         { map_not_null()->set_control(c); }
426  void set_i_o      (Node* c)         { map_not_null()->set_i_o(c); }
427  void set_local(uint idx, Node* c)   { map_not_null(); _map->set_local(   _map->_jvms, idx, c); }
428  void set_stack(uint idx, Node* c)   { map_not_null(); _map->set_stack(   _map->_jvms, idx, c); }
429  void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
430  void ensure_stack(uint stk_size)    { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
431
432  // Access unaliased memory
433  Node* memory(uint alias_idx);
434  Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
435  Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
436
437  // Access immutable memory
438  Node* immutable_memory() { return C->immutable_memory(); }
439
440  // Set unaliased memory
441  void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
442  void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
443  void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
444
445  // Get the entire memory state (probably a MergeMemNode), and reset it
446  // (The resetting prevents somebody from using the dangling Node pointer.)
447  Node* reset_memory();
448
449  // Get the entire memory state, asserted to be a MergeMemNode.
450  MergeMemNode* merged_memory() {
451    Node* mem = map_not_null()->memory();
452    assert(mem->is_MergeMem(), "parse memory is always pre-split");
453    return mem->as_MergeMem();
454  }
455
456  // Set the entire memory state; produce a new MergeMemNode.
457  void set_all_memory(Node* newmem);
458
459  // Create a memory projection from the call, then set_all_memory.
460  void set_all_memory_call(Node* call, bool separate_io_proj = false);
461
462  // Create a LoadNode, reading from the parser's memory state.
463  // (Note:  require_atomic_access is useful only with T_LONG.)
464  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
465                  bool require_atomic_access = false) {
466    // This version computes alias_index from bottom_type
467    return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
468                     require_atomic_access);
469  }
470  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
471    // This version computes alias_index from an address type
472    assert(adr_type != NULL, "use other make_load factory");
473    return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
474                     require_atomic_access);
475  }
476  // This is the base version which is given an alias index.
477  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
478
479  // Create & transform a StoreNode and store the effect into the
480  // parser's memory state.
481  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
482                        const TypePtr* adr_type,
483                        bool require_atomic_access = false) {
484    // This version computes alias_index from an address type
485    assert(adr_type != NULL, "use other store_to_memory factory");
486    return store_to_memory(ctl, adr, val, bt,
487                           C->get_alias_index(adr_type),
488                           require_atomic_access);
489  }
490  // This is the base version which is given alias index
491  // Return the new StoreXNode
492  Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
493                        int adr_idx,
494                        bool require_atomic_access = false);
495
496
497  // All in one pre-barrier, store, post_barrier
498  // Insert a write-barrier'd store.  This is to let generational GC
499  // work; we have to flag all oop-stores before the next GC point.
500  //
501  // It comes in 3 flavors of store to an object, array, or unknown.
502  // We use precise card marks for arrays to avoid scanning the entire
503  // array. We use imprecise for object. We use precise for unknown
504  // since we don't know if we have an array or and object or even
505  // where the object starts.
506  //
507  // If val==NULL, it is taken to be a completely unknown value. QQQ
508
509  Node* store_oop(Node* ctl,
510                  Node* obj,   // containing obj
511                  Node* adr,  // actual adress to store val at
512                  const TypePtr* adr_type,
513                  Node* val,
514                  const TypeOopPtr* val_type,
515                  BasicType bt,
516                  bool use_precise);
517
518  Node* store_oop_to_object(Node* ctl,
519                            Node* obj,   // containing obj
520                            Node* adr,  // actual adress to store val at
521                            const TypePtr* adr_type,
522                            Node* val,
523                            const TypeOopPtr* val_type,
524                            BasicType bt) {
525    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
526  }
527
528  Node* store_oop_to_array(Node* ctl,
529                           Node* obj,   // containing obj
530                           Node* adr,  // actual adress to store val at
531                           const TypePtr* adr_type,
532                           Node* val,
533                           const TypeOopPtr* val_type,
534                           BasicType bt) {
535    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
536  }
537
538  // Could be an array or object we don't know at compile time (unsafe ref.)
539  Node* store_oop_to_unknown(Node* ctl,
540                             Node* obj,   // containing obj
541                             Node* adr,  // actual adress to store val at
542                             const TypePtr* adr_type,
543                             Node* val,
544                             BasicType bt);
545
546  // For the few case where the barriers need special help
547  void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
548                   Node* val, const TypeOopPtr* val_type, BasicType bt);
549
550  void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
551                    Node* val, BasicType bt, bool use_precise);
552
553  // Return addressing for an array element.
554  Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
555                              // Optional constraint on the array size:
556                              const TypeInt* sizetype = NULL);
557
558  // Return a load of array element at idx.
559  Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
560
561  //---------------- Dtrace support --------------------
562  void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
563  void make_dtrace_method_entry(ciMethod* method) {
564    make_dtrace_method_entry_exit(method, true);
565  }
566  void make_dtrace_method_exit(ciMethod* method) {
567    make_dtrace_method_entry_exit(method, false);
568  }
569
570  //--------------- stub generation -------------------
571 public:
572  void gen_stub(address C_function,
573                const char *name,
574                int is_fancy_jump,
575                bool pass_tls,
576                bool return_pc);
577
578  //---------- help for generating calls --------------
579
580  // Do a null check on the receiver, which is in argument(0).
581  Node* null_check_receiver(ciMethod* callee) {
582    assert(!callee->is_static(), "must be a virtual method");
583    int nargs = 1 + callee->signature()->size();
584    // Null check on self without removing any arguments.  The argument
585    // null check technically happens in the wrong place, which can lead to
586    // invalid stack traces when the primitive is inlined into a method
587    // which handles NullPointerExceptions.
588    Node* receiver = argument(0);
589    _sp += nargs;
590    receiver = do_null_check(receiver, T_OBJECT);
591    _sp -= nargs;
592    return receiver;
593  }
594
595  // Fill in argument edges for the call from argument(0), argument(1), ...
596  // (The next step is to call set_edges_for_java_call.)
597  void  set_arguments_for_java_call(CallJavaNode* call);
598
599  // Fill in non-argument edges for the call.
600  // Transform the call, and update the basics: control, i_o, memory.
601  // (The next step is usually to call set_results_for_java_call.)
602  void set_edges_for_java_call(CallJavaNode* call,
603                               bool must_throw = false, bool separate_io_proj = false);
604
605  // Finish up a java call that was started by set_edges_for_java_call.
606  // Call add_exception on any throw arising from the call.
607  // Return the call result (transformed).
608  Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
609
610  // Similar to set_edges_for_java_call, but simplified for runtime calls.
611  void  set_predefined_output_for_runtime_call(Node* call) {
612    set_predefined_output_for_runtime_call(call, NULL, NULL);
613  }
614  void  set_predefined_output_for_runtime_call(Node* call,
615                                               Node* keep_mem,
616                                               const TypePtr* hook_mem);
617  Node* set_predefined_input_for_runtime_call(SafePointNode* call);
618
619  // Replace the call with the current state of the kit.  Requires
620  // that the call was generated with separate io_projs so that
621  // exceptional control flow can be handled properly.
622  void replace_call(CallNode* call, Node* result);
623
624  // helper functions for statistics
625  void increment_counter(address counter_addr);   // increment a debug counter
626  void increment_counter(Node*   counter_addr);   // increment a debug counter
627
628  // Bail out to the interpreter right now
629  // The optional klass is the one causing the trap.
630  // The optional reason is debug information written to the compile log.
631  // Optional must_throw is the same as with add_safepoint_edges.
632  void uncommon_trap(int trap_request,
633                     ciKlass* klass = NULL, const char* reason_string = NULL,
634                     bool must_throw = false, bool keep_exact_action = false);
635
636  // Shorthand, to avoid saying "Deoptimization::" so many times.
637  void uncommon_trap(Deoptimization::DeoptReason reason,
638                     Deoptimization::DeoptAction action,
639                     ciKlass* klass = NULL, const char* reason_string = NULL,
640                     bool must_throw = false, bool keep_exact_action = false) {
641    uncommon_trap(Deoptimization::make_trap_request(reason, action),
642                  klass, reason_string, must_throw, keep_exact_action);
643  }
644
645  // Report if there were too many traps at the current method and bci.
646  // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
647  // If there is no MDO at all, report no trap unless told to assume it.
648  bool too_many_traps(Deoptimization::DeoptReason reason) {
649    return C->too_many_traps(method(), bci(), reason);
650  }
651
652  // Report if there were too many recompiles at the current method and bci.
653  bool too_many_recompiles(Deoptimization::DeoptReason reason) {
654    return C->too_many_recompiles(method(), bci(), reason);
655  }
656
657  // Returns the object (if any) which was created the moment before.
658  Node* just_allocated_object(Node* current_control);
659
660  static bool use_ReduceInitialCardMarks() {
661    return (ReduceInitialCardMarks
662            && Universe::heap()->can_elide_tlab_store_barriers());
663  }
664
665  void sync_kit(IdealKit& ideal);
666
667  // vanilla/CMS post barrier
668  void write_barrier_post(Node *store, Node* obj,
669                          Node* adr,  uint adr_idx, Node* val, bool use_precise);
670
671  // G1 pre/post barriers
672  void g1_write_barrier_pre(Node* obj,
673                            Node* adr,
674                            uint alias_idx,
675                            Node* val,
676                            const TypeOopPtr* val_type,
677                            BasicType bt);
678
679  void g1_write_barrier_post(Node* store,
680                             Node* obj,
681                             Node* adr,
682                             uint alias_idx,
683                             Node* val,
684                             BasicType bt,
685                             bool use_precise);
686  // Helper function for g1
687  private:
688  void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
689                    Node* index, Node* index_adr,
690                    Node* buffer, const TypeFunc* tf);
691
692  public:
693  // Helper function to round double arguments before a call
694  void round_double_arguments(ciMethod* dest_method);
695  void round_double_result(ciMethod* dest_method);
696
697  // rounding for strict float precision conformance
698  Node* precision_rounding(Node* n);
699
700  // rounding for strict double precision conformance
701  Node* dprecision_rounding(Node* n);
702
703  // rounding for non-strict double stores
704  Node* dstore_rounding(Node* n);
705
706  // Helper functions for fast/slow path codes
707  Node* opt_iff(Node* region, Node* iff);
708  Node* make_runtime_call(int flags,
709                          const TypeFunc* call_type, address call_addr,
710                          const char* call_name,
711                          const TypePtr* adr_type, // NULL if no memory effects
712                          Node* parm0 = NULL, Node* parm1 = NULL,
713                          Node* parm2 = NULL, Node* parm3 = NULL,
714                          Node* parm4 = NULL, Node* parm5 = NULL,
715                          Node* parm6 = NULL, Node* parm7 = NULL);
716  enum {  // flag values for make_runtime_call
717    RC_NO_FP = 1,               // CallLeafNoFPNode
718    RC_NO_IO = 2,               // do not hook IO edges
719    RC_NO_LEAF = 4,             // CallStaticJavaNode
720    RC_MUST_THROW = 8,          // flag passed to add_safepoint_edges
721    RC_NARROW_MEM = 16,         // input memory is same as output
722    RC_UNCOMMON = 32,           // freq. expected to be like uncommon trap
723    RC_LEAF = 0                 // null value:  no flags set
724  };
725
726  // merge in all memory slices from new_mem, along the given path
727  void merge_memory(Node* new_mem, Node* region, int new_path);
728  void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
729
730  // Helper functions to build synchronizations
731  int next_monitor();
732  Node* insert_mem_bar(int opcode, Node* precedent = NULL);
733  Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
734  // Optional 'precedent' is appended as an extra edge, to force ordering.
735  FastLockNode* shared_lock(Node* obj);
736  void shared_unlock(Node* box, Node* obj);
737
738  // helper functions for the fast path/slow path idioms
739  Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result);
740
741  // Generate an instance-of idiom.  Used by both the instance-of bytecode
742  // and the reflective instance-of call.
743  Node* gen_instanceof( Node *subobj, Node* superkls );
744
745  // Generate a check-cast idiom.  Used by both the check-cast bytecode
746  // and the array-store bytecode
747  Node* gen_checkcast( Node *subobj, Node* superkls,
748                       Node* *failure_control = NULL );
749
750  // Generate a subtyping check.  Takes as input the subtype and supertype.
751  // Returns 2 values: sets the default control() to the true path and
752  // returns the false path.  Only reads from constant memory taken from the
753  // default memory; does not write anything.  It also doesn't take in an
754  // Object; if you wish to check an Object you need to load the Object's
755  // class prior to coming here.
756  Node* gen_subtype_check(Node* subklass, Node* superklass);
757
758  // Static parse-time type checking logic for gen_subtype_check:
759  enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
760  int static_subtype_check(ciKlass* superk, ciKlass* subk);
761
762  // Exact type check used for predicted calls and casts.
763  // Rewrites (*casted_receiver) to be casted to the stronger type.
764  // (Caller is responsible for doing replace_in_map.)
765  Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
766                            Node* *casted_receiver);
767
768  // implementation of object creation
769  Node* set_output_for_allocation(AllocateNode* alloc,
770                                  const TypeOopPtr* oop_type,
771                                  bool raw_mem_only);
772  Node* get_layout_helper(Node* klass_node, jint& constant_value);
773  Node* new_instance(Node* klass_node,
774                     Node* slow_test = NULL,
775                     bool raw_mem_only = false,
776                     Node* *return_size_val = NULL);
777  Node* new_array(Node* klass_node, Node* count_val, int nargs,
778                  bool raw_mem_only = false, Node* *return_size_val = NULL);
779
780  // Handy for making control flow
781  IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
782    IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
783    _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
784    // Place 'if' on worklist if it will be in graph
785    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
786    return iff;
787  }
788
789  IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
790    IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
791    _gvn.transform(iff);                           // Value may be known at parse-time
792    // Place 'if' on worklist if it will be in graph
793    if (!tst->is_Con())  record_for_igvn(iff);     // Range-check and Null-check removal is later
794    return iff;
795  }
796};
797
798// Helper class to support building of control flow branches. Upon
799// creation the map and sp at bci are cloned and restored upon de-
800// struction. Typical use:
801//
802// { PreserveJVMState pjvms(this);
803//   // code of new branch
804// }
805// // here the JVM state at bci is established
806
807class PreserveJVMState: public StackObj {
808 protected:
809  GraphKit*      _kit;
810#ifdef ASSERT
811  int            _block;  // PO of current block, if a Parse
812  int            _bci;
813#endif
814  SafePointNode* _map;
815  uint           _sp;
816
817 public:
818  PreserveJVMState(GraphKit* kit, bool clone_map = true);
819  ~PreserveJVMState();
820};
821
822// Helper class to build cutouts of the form if (p) ; else {x...}.
823// The code {x...} must not fall through.
824// The kit's main flow of control is set to the "then" continuation of if(p).
825class BuildCutout: public PreserveJVMState {
826 public:
827  BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
828  ~BuildCutout();
829};
830
831// Helper class to preserve the original _reexecute bit and _sp and restore
832// them back
833class PreserveReexecuteState: public StackObj {
834 protected:
835  GraphKit*                 _kit;
836  uint                      _sp;
837  JVMState::ReexecuteState  _reexecute;
838
839 public:
840  PreserveReexecuteState(GraphKit* kit);
841  ~PreserveReexecuteState();
842};
843
844#endif // SHARE_VM_OPTO_GRAPHKIT_HPP
845