library_call.cpp revision 6444:47b707b6c4e7
1/*
2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "classfile/vmSymbols.hpp"
28#include "compiler/compileBroker.hpp"
29#include "compiler/compileLog.hpp"
30#include "oops/objArrayKlass.hpp"
31#include "opto/addnode.hpp"
32#include "opto/callGenerator.hpp"
33#include "opto/castnode.hpp"
34#include "opto/cfgnode.hpp"
35#include "opto/convertnode.hpp"
36#include "opto/countbitsnode.hpp"
37#include "opto/intrinsicnode.hpp"
38#include "opto/idealKit.hpp"
39#include "opto/mathexactnode.hpp"
40#include "opto/movenode.hpp"
41#include "opto/mulnode.hpp"
42#include "opto/narrowptrnode.hpp"
43#include "opto/parse.hpp"
44#include "opto/runtime.hpp"
45#include "opto/subnode.hpp"
46#include "prims/nativeLookup.hpp"
47#include "runtime/sharedRuntime.hpp"
48#include "trace/traceMacros.hpp"
49
50class LibraryIntrinsic : public InlineCallGenerator {
51  // Extend the set of intrinsics known to the runtime:
52 public:
53 private:
54  bool             _is_virtual;
55  bool             _is_predicted;
56  bool             _does_virtual_dispatch;
57  vmIntrinsics::ID _intrinsic_id;
58
59 public:
60  LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id)
61    : InlineCallGenerator(m),
62      _is_virtual(is_virtual),
63      _is_predicted(is_predicted),
64      _does_virtual_dispatch(does_virtual_dispatch),
65      _intrinsic_id(id)
66  {
67  }
68  virtual bool is_intrinsic() const { return true; }
69  virtual bool is_virtual()   const { return _is_virtual; }
70  virtual bool is_predicted()   const { return _is_predicted; }
71  virtual bool does_virtual_dispatch()   const { return _does_virtual_dispatch; }
72  virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
73  virtual Node* generate_predicate(JVMState* jvms);
74  vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
75};
76
77
78// Local helper class for LibraryIntrinsic:
79class LibraryCallKit : public GraphKit {
80 private:
81  LibraryIntrinsic* _intrinsic;     // the library intrinsic being called
82  Node*             _result;        // the result node, if any
83  int               _reexecute_sp;  // the stack pointer when bytecode needs to be reexecuted
84
85  const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
86
87 public:
88  LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
89    : GraphKit(jvms),
90      _intrinsic(intrinsic),
91      _result(NULL)
92  {
93    // Check if this is a root compile.  In that case we don't have a caller.
94    if (!jvms->has_method()) {
95      _reexecute_sp = sp();
96    } else {
97      // Find out how many arguments the interpreter needs when deoptimizing
98      // and save the stack pointer value so it can used by uncommon_trap.
99      // We find the argument count by looking at the declared signature.
100      bool ignored_will_link;
101      ciSignature* declared_signature = NULL;
102      ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
103      const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
104      _reexecute_sp = sp() + nargs;  // "push" arguments back on stack
105    }
106  }
107
108  virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
109
110  ciMethod*         caller()    const    { return jvms()->method(); }
111  int               bci()       const    { return jvms()->bci(); }
112  LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
113  vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
114  ciMethod*         callee()    const    { return _intrinsic->method(); }
115
116  bool try_to_inline();
117  Node* try_to_predicate();
118
119  void push_result() {
120    // Push the result onto the stack.
121    if (!stopped() && result() != NULL) {
122      BasicType bt = result()->bottom_type()->basic_type();
123      push_node(bt, result());
124    }
125  }
126
127 private:
128  void fatal_unexpected_iid(vmIntrinsics::ID iid) {
129    fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
130  }
131
132  void  set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
133  void  set_result(RegionNode* region, PhiNode* value);
134  Node*     result() { return _result; }
135
136  virtual int reexecute_sp() { return _reexecute_sp; }
137
138  // Helper functions to inline natives
139  Node* generate_guard(Node* test, RegionNode* region, float true_prob);
140  Node* generate_slow_guard(Node* test, RegionNode* region);
141  Node* generate_fair_guard(Node* test, RegionNode* region);
142  Node* generate_negative_guard(Node* index, RegionNode* region,
143                                // resulting CastII of index:
144                                Node* *pos_index = NULL);
145  Node* generate_nonpositive_guard(Node* index, bool never_negative,
146                                   // resulting CastII of index:
147                                   Node* *pos_index = NULL);
148  Node* generate_limit_guard(Node* offset, Node* subseq_length,
149                             Node* array_length,
150                             RegionNode* region);
151  Node* generate_current_thread(Node* &tls_output);
152  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
153                              bool disjoint_bases, const char* &name, bool dest_uninitialized);
154  Node* load_mirror_from_klass(Node* klass);
155  Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
156                                      RegionNode* region, int null_path,
157                                      int offset);
158  Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
159                               RegionNode* region, int null_path) {
160    int offset = java_lang_Class::klass_offset_in_bytes();
161    return load_klass_from_mirror_common(mirror, never_see_null,
162                                         region, null_path,
163                                         offset);
164  }
165  Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
166                                     RegionNode* region, int null_path) {
167    int offset = java_lang_Class::array_klass_offset_in_bytes();
168    return load_klass_from_mirror_common(mirror, never_see_null,
169                                         region, null_path,
170                                         offset);
171  }
172  Node* generate_access_flags_guard(Node* kls,
173                                    int modifier_mask, int modifier_bits,
174                                    RegionNode* region);
175  Node* generate_interface_guard(Node* kls, RegionNode* region);
176  Node* generate_array_guard(Node* kls, RegionNode* region) {
177    return generate_array_guard_common(kls, region, false, false);
178  }
179  Node* generate_non_array_guard(Node* kls, RegionNode* region) {
180    return generate_array_guard_common(kls, region, false, true);
181  }
182  Node* generate_objArray_guard(Node* kls, RegionNode* region) {
183    return generate_array_guard_common(kls, region, true, false);
184  }
185  Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
186    return generate_array_guard_common(kls, region, true, true);
187  }
188  Node* generate_array_guard_common(Node* kls, RegionNode* region,
189                                    bool obj_array, bool not_array);
190  Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
191  CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
192                                     bool is_virtual = false, bool is_static = false);
193  CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
194    return generate_method_call(method_id, false, true);
195  }
196  CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
197    return generate_method_call(method_id, true, false);
198  }
199  Node * load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact, bool is_static);
200
201  Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2);
202  Node* make_string_method_node(int opcode, Node* str1, Node* str2);
203  bool inline_string_compareTo();
204  bool inline_string_indexOf();
205  Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
206  bool inline_string_equals();
207  Node* round_double_node(Node* n);
208  bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
209  bool inline_math_native(vmIntrinsics::ID id);
210  bool inline_trig(vmIntrinsics::ID id);
211  bool inline_math(vmIntrinsics::ID id);
212  template <typename OverflowOp>
213  bool inline_math_overflow(Node* arg1, Node* arg2);
214  void inline_math_mathExact(Node* math, Node* test);
215  bool inline_math_addExactI(bool is_increment);
216  bool inline_math_addExactL(bool is_increment);
217  bool inline_math_multiplyExactI();
218  bool inline_math_multiplyExactL();
219  bool inline_math_negateExactI();
220  bool inline_math_negateExactL();
221  bool inline_math_subtractExactI(bool is_decrement);
222  bool inline_math_subtractExactL(bool is_decrement);
223  bool inline_exp();
224  bool inline_pow();
225  Node* finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
226  bool inline_min_max(vmIntrinsics::ID id);
227  Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
228  // This returns Type::AnyPtr, RawPtr, or OopPtr.
229  int classify_unsafe_addr(Node* &base, Node* &offset);
230  Node* make_unsafe_address(Node* base, Node* offset);
231  // Helper for inline_unsafe_access.
232  // Generates the guards that check whether the result of
233  // Unsafe.getObject should be recorded in an SATB log buffer.
234  void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
235  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
236  bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
237  static bool klass_needs_init_guard(Node* kls);
238  bool inline_unsafe_allocate();
239  bool inline_unsafe_copyMemory();
240  bool inline_native_currentThread();
241#ifdef TRACE_HAVE_INTRINSICS
242  bool inline_native_classID();
243  bool inline_native_threadID();
244#endif
245  bool inline_native_time_funcs(address method, const char* funcName);
246  bool inline_native_isInterrupted();
247  bool inline_native_Class_query(vmIntrinsics::ID id);
248  bool inline_native_subtype_check();
249
250  bool inline_native_newArray();
251  bool inline_native_getLength();
252  bool inline_array_copyOf(bool is_copyOfRange);
253  bool inline_array_equals();
254  void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
255  bool inline_native_clone(bool is_virtual);
256  bool inline_native_Reflection_getCallerClass();
257  // Helper function for inlining native object hash method
258  bool inline_native_hashcode(bool is_virtual, bool is_static);
259  bool inline_native_getClass();
260
261  // Helper functions for inlining arraycopy
262  bool inline_arraycopy();
263  void generate_arraycopy(const TypePtr* adr_type,
264                          BasicType basic_elem_type,
265                          Node* src,  Node* src_offset,
266                          Node* dest, Node* dest_offset,
267                          Node* copy_length,
268                          bool disjoint_bases = false,
269                          bool length_never_negative = false,
270                          RegionNode* slow_region = NULL);
271  AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
272                                                RegionNode* slow_region);
273  void generate_clear_array(const TypePtr* adr_type,
274                            Node* dest,
275                            BasicType basic_elem_type,
276                            Node* slice_off,
277                            Node* slice_len,
278                            Node* slice_end);
279  bool generate_block_arraycopy(const TypePtr* adr_type,
280                                BasicType basic_elem_type,
281                                AllocateNode* alloc,
282                                Node* src,  Node* src_offset,
283                                Node* dest, Node* dest_offset,
284                                Node* dest_size, bool dest_uninitialized);
285  void generate_slow_arraycopy(const TypePtr* adr_type,
286                               Node* src,  Node* src_offset,
287                               Node* dest, Node* dest_offset,
288                               Node* copy_length, bool dest_uninitialized);
289  Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
290                                     Node* dest_elem_klass,
291                                     Node* src,  Node* src_offset,
292                                     Node* dest, Node* dest_offset,
293                                     Node* copy_length, bool dest_uninitialized);
294  Node* generate_generic_arraycopy(const TypePtr* adr_type,
295                                   Node* src,  Node* src_offset,
296                                   Node* dest, Node* dest_offset,
297                                   Node* copy_length, bool dest_uninitialized);
298  void generate_unchecked_arraycopy(const TypePtr* adr_type,
299                                    BasicType basic_elem_type,
300                                    bool disjoint_bases,
301                                    Node* src,  Node* src_offset,
302                                    Node* dest, Node* dest_offset,
303                                    Node* copy_length, bool dest_uninitialized);
304  typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
305  bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
306  bool inline_unsafe_ordered_store(BasicType type);
307  bool inline_unsafe_fence(vmIntrinsics::ID id);
308  bool inline_fp_conversions(vmIntrinsics::ID id);
309  bool inline_number_methods(vmIntrinsics::ID id);
310  bool inline_reference_get();
311  bool inline_aescrypt_Block(vmIntrinsics::ID id);
312  bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
313  Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
314  Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
315  Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
316  bool inline_encodeISOArray();
317  bool inline_updateCRC32();
318  bool inline_updateBytesCRC32();
319  bool inline_updateByteBufferCRC32();
320};
321
322
323//---------------------------make_vm_intrinsic----------------------------
324CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
325  vmIntrinsics::ID id = m->intrinsic_id();
326  assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
327
328  if (DisableIntrinsic[0] != '\0'
329      && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
330    // disabled by a user request on the command line:
331    // example: -XX:DisableIntrinsic=_hashCode,_getClass
332    return NULL;
333  }
334
335  if (!m->is_loaded()) {
336    // do not attempt to inline unloaded methods
337    return NULL;
338  }
339
340  // Only a few intrinsics implement a virtual dispatch.
341  // They are expensive calls which are also frequently overridden.
342  if (is_virtual) {
343    switch (id) {
344    case vmIntrinsics::_hashCode:
345    case vmIntrinsics::_clone:
346      // OK, Object.hashCode and Object.clone intrinsics come in both flavors
347      break;
348    default:
349      return NULL;
350    }
351  }
352
353  // -XX:-InlineNatives disables nearly all intrinsics:
354  if (!InlineNatives) {
355    switch (id) {
356    case vmIntrinsics::_indexOf:
357    case vmIntrinsics::_compareTo:
358    case vmIntrinsics::_equals:
359    case vmIntrinsics::_equalsC:
360    case vmIntrinsics::_getAndAddInt:
361    case vmIntrinsics::_getAndAddLong:
362    case vmIntrinsics::_getAndSetInt:
363    case vmIntrinsics::_getAndSetLong:
364    case vmIntrinsics::_getAndSetObject:
365    case vmIntrinsics::_loadFence:
366    case vmIntrinsics::_storeFence:
367    case vmIntrinsics::_fullFence:
368      break;  // InlineNatives does not control String.compareTo
369    case vmIntrinsics::_Reference_get:
370      break;  // InlineNatives does not control Reference.get
371    default:
372      return NULL;
373    }
374  }
375
376  bool is_predicted = false;
377  bool does_virtual_dispatch = false;
378
379  switch (id) {
380  case vmIntrinsics::_compareTo:
381    if (!SpecialStringCompareTo)  return NULL;
382    if (!Matcher::match_rule_supported(Op_StrComp))  return NULL;
383    break;
384  case vmIntrinsics::_indexOf:
385    if (!SpecialStringIndexOf)  return NULL;
386    break;
387  case vmIntrinsics::_equals:
388    if (!SpecialStringEquals)  return NULL;
389    if (!Matcher::match_rule_supported(Op_StrEquals))  return NULL;
390    break;
391  case vmIntrinsics::_equalsC:
392    if (!SpecialArraysEquals)  return NULL;
393    if (!Matcher::match_rule_supported(Op_AryEq))  return NULL;
394    break;
395  case vmIntrinsics::_arraycopy:
396    if (!InlineArrayCopy)  return NULL;
397    break;
398  case vmIntrinsics::_copyMemory:
399    if (StubRoutines::unsafe_arraycopy() == NULL)  return NULL;
400    if (!InlineArrayCopy)  return NULL;
401    break;
402  case vmIntrinsics::_hashCode:
403    if (!InlineObjectHash)  return NULL;
404    does_virtual_dispatch = true;
405    break;
406  case vmIntrinsics::_clone:
407    does_virtual_dispatch = true;
408  case vmIntrinsics::_copyOf:
409  case vmIntrinsics::_copyOfRange:
410    if (!InlineObjectCopy)  return NULL;
411    // These also use the arraycopy intrinsic mechanism:
412    if (!InlineArrayCopy)  return NULL;
413    break;
414  case vmIntrinsics::_encodeISOArray:
415    if (!SpecialEncodeISOArray)  return NULL;
416    if (!Matcher::match_rule_supported(Op_EncodeISOArray))  return NULL;
417    break;
418  case vmIntrinsics::_checkIndex:
419    // We do not intrinsify this.  The optimizer does fine with it.
420    return NULL;
421
422  case vmIntrinsics::_getCallerClass:
423    if (!InlineReflectionGetCallerClass)  return NULL;
424    if (SystemDictionary::reflect_CallerSensitive_klass() == NULL)  return NULL;
425    break;
426
427  case vmIntrinsics::_bitCount_i:
428    if (!Matcher::match_rule_supported(Op_PopCountI)) return NULL;
429    break;
430
431  case vmIntrinsics::_bitCount_l:
432    if (!Matcher::match_rule_supported(Op_PopCountL)) return NULL;
433    break;
434
435  case vmIntrinsics::_numberOfLeadingZeros_i:
436    if (!Matcher::match_rule_supported(Op_CountLeadingZerosI)) return NULL;
437    break;
438
439  case vmIntrinsics::_numberOfLeadingZeros_l:
440    if (!Matcher::match_rule_supported(Op_CountLeadingZerosL)) return NULL;
441    break;
442
443  case vmIntrinsics::_numberOfTrailingZeros_i:
444    if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL;
445    break;
446
447  case vmIntrinsics::_numberOfTrailingZeros_l:
448    if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
449    break;
450
451  case vmIntrinsics::_reverseBytes_c:
452    if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
453    break;
454  case vmIntrinsics::_reverseBytes_s:
455    if (!Matcher::match_rule_supported(Op_ReverseBytesS))  return NULL;
456    break;
457  case vmIntrinsics::_reverseBytes_i:
458    if (!Matcher::match_rule_supported(Op_ReverseBytesI))  return NULL;
459    break;
460  case vmIntrinsics::_reverseBytes_l:
461    if (!Matcher::match_rule_supported(Op_ReverseBytesL))  return NULL;
462    break;
463
464  case vmIntrinsics::_Reference_get:
465    // Use the intrinsic version of Reference.get() so that the value in
466    // the referent field can be registered by the G1 pre-barrier code.
467    // Also add memory barrier to prevent commoning reads from this field
468    // across safepoint since GC can change it value.
469    break;
470
471  case vmIntrinsics::_compareAndSwapObject:
472#ifdef _LP64
473    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
474#endif
475    break;
476
477  case vmIntrinsics::_compareAndSwapLong:
478    if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
479    break;
480
481  case vmIntrinsics::_getAndAddInt:
482    if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
483    break;
484
485  case vmIntrinsics::_getAndAddLong:
486    if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
487    break;
488
489  case vmIntrinsics::_getAndSetInt:
490    if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
491    break;
492
493  case vmIntrinsics::_getAndSetLong:
494    if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
495    break;
496
497  case vmIntrinsics::_getAndSetObject:
498#ifdef _LP64
499    if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
500    if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
501    break;
502#else
503    if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
504    break;
505#endif
506
507  case vmIntrinsics::_aescrypt_encryptBlock:
508  case vmIntrinsics::_aescrypt_decryptBlock:
509    if (!UseAESIntrinsics) return NULL;
510    break;
511
512  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
513  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
514    if (!UseAESIntrinsics) return NULL;
515    // these two require the predicated logic
516    is_predicted = true;
517    break;
518
519  case vmIntrinsics::_updateCRC32:
520  case vmIntrinsics::_updateBytesCRC32:
521  case vmIntrinsics::_updateByteBufferCRC32:
522    if (!UseCRC32Intrinsics) return NULL;
523    break;
524
525  case vmIntrinsics::_incrementExactI:
526  case vmIntrinsics::_addExactI:
527    if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
528    break;
529  case vmIntrinsics::_incrementExactL:
530  case vmIntrinsics::_addExactL:
531    if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
532    break;
533  case vmIntrinsics::_decrementExactI:
534  case vmIntrinsics::_subtractExactI:
535    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
536    break;
537  case vmIntrinsics::_decrementExactL:
538  case vmIntrinsics::_subtractExactL:
539    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
540    break;
541  case vmIntrinsics::_negateExactI:
542    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
543    break;
544  case vmIntrinsics::_negateExactL:
545    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
546    break;
547  case vmIntrinsics::_multiplyExactI:
548    if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
549    break;
550  case vmIntrinsics::_multiplyExactL:
551    if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
552    break;
553
554 default:
555    assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
556    assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
557    break;
558  }
559
560  // -XX:-InlineClassNatives disables natives from the Class class.
561  // The flag applies to all reflective calls, notably Array.newArray
562  // (visible to Java programmers as Array.newInstance).
563  if (m->holder()->name() == ciSymbol::java_lang_Class() ||
564      m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
565    if (!InlineClassNatives)  return NULL;
566  }
567
568  // -XX:-InlineThreadNatives disables natives from the Thread class.
569  if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
570    if (!InlineThreadNatives)  return NULL;
571  }
572
573  // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
574  if (m->holder()->name() == ciSymbol::java_lang_Math() ||
575      m->holder()->name() == ciSymbol::java_lang_Float() ||
576      m->holder()->name() == ciSymbol::java_lang_Double()) {
577    if (!InlineMathNatives)  return NULL;
578  }
579
580  // -XX:-InlineUnsafeOps disables natives from the Unsafe class.
581  if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
582    if (!InlineUnsafeOps)  return NULL;
583  }
584
585  return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id);
586}
587
588//----------------------register_library_intrinsics-----------------------
589// Initialize this file's data structures, for each Compile instance.
590void Compile::register_library_intrinsics() {
591  // Nothing to do here.
592}
593
594JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
595  LibraryCallKit kit(jvms, this);
596  Compile* C = kit.C;
597  int nodes = C->unique();
598#ifndef PRODUCT
599  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
600    char buf[1000];
601    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
602    tty->print_cr("Intrinsic %s", str);
603  }
604#endif
605  ciMethod* callee = kit.callee();
606  const int bci    = kit.bci();
607
608  // Try to inline the intrinsic.
609  if (kit.try_to_inline()) {
610    if (C->print_intrinsics() || C->print_inlining()) {
611      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
612    }
613    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
614    if (C->log()) {
615      C->log()->elem("intrinsic id='%s'%s nodes='%d'",
616                     vmIntrinsics::name_at(intrinsic_id()),
617                     (is_virtual() ? " virtual='1'" : ""),
618                     C->unique() - nodes);
619    }
620    // Push the result from the inlined method onto the stack.
621    kit.push_result();
622    C->print_inlining_update(this);
623    return kit.transfer_exceptions_into_jvms();
624  }
625
626  // The intrinsic bailed out
627  if (C->print_intrinsics() || C->print_inlining()) {
628    if (jvms->has_method()) {
629      // Not a root compile.
630      const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
631      C->print_inlining(callee, jvms->depth() - 1, bci, msg);
632    } else {
633      // Root compile
634      tty->print("Did not generate intrinsic %s%s at bci:%d in",
635               vmIntrinsics::name_at(intrinsic_id()),
636               (is_virtual() ? " (virtual)" : ""), bci);
637    }
638  }
639  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
640  C->print_inlining_update(this);
641  return NULL;
642}
643
644Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
645  LibraryCallKit kit(jvms, this);
646  Compile* C = kit.C;
647  int nodes = C->unique();
648#ifndef PRODUCT
649  assert(is_predicted(), "sanity");
650  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
651    char buf[1000];
652    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
653    tty->print_cr("Predicate for intrinsic %s", str);
654  }
655#endif
656  ciMethod* callee = kit.callee();
657  const int bci    = kit.bci();
658
659  Node* slow_ctl = kit.try_to_predicate();
660  if (!kit.failing()) {
661    if (C->print_intrinsics() || C->print_inlining()) {
662      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
663    }
664    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
665    if (C->log()) {
666      C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
667                     vmIntrinsics::name_at(intrinsic_id()),
668                     (is_virtual() ? " virtual='1'" : ""),
669                     C->unique() - nodes);
670    }
671    return slow_ctl; // Could be NULL if the check folds.
672  }
673
674  // The intrinsic bailed out
675  if (C->print_intrinsics() || C->print_inlining()) {
676    if (jvms->has_method()) {
677      // Not a root compile.
678      const char* msg = "failed to generate predicate for intrinsic";
679      C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
680    } else {
681      // Root compile
682      C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
683                                        vmIntrinsics::name_at(intrinsic_id()),
684                                        (is_virtual() ? " (virtual)" : ""), bci);
685    }
686  }
687  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
688  return NULL;
689}
690
691bool LibraryCallKit::try_to_inline() {
692  // Handle symbolic names for otherwise undistinguished boolean switches:
693  const bool is_store       = true;
694  const bool is_native_ptr  = true;
695  const bool is_static      = true;
696  const bool is_volatile    = true;
697
698  if (!jvms()->has_method()) {
699    // Root JVMState has a null method.
700    assert(map()->memory()->Opcode() == Op_Parm, "");
701    // Insert the memory aliasing node
702    set_all_memory(reset_memory());
703  }
704  assert(merged_memory(), "");
705
706
707  switch (intrinsic_id()) {
708  case vmIntrinsics::_hashCode:                 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
709  case vmIntrinsics::_identityHashCode:         return inline_native_hashcode(/*!virtual*/ false,         is_static);
710  case vmIntrinsics::_getClass:                 return inline_native_getClass();
711
712  case vmIntrinsics::_dsin:
713  case vmIntrinsics::_dcos:
714  case vmIntrinsics::_dtan:
715  case vmIntrinsics::_dabs:
716  case vmIntrinsics::_datan2:
717  case vmIntrinsics::_dsqrt:
718  case vmIntrinsics::_dexp:
719  case vmIntrinsics::_dlog:
720  case vmIntrinsics::_dlog10:
721  case vmIntrinsics::_dpow:                     return inline_math_native(intrinsic_id());
722
723  case vmIntrinsics::_min:
724  case vmIntrinsics::_max:                      return inline_min_max(intrinsic_id());
725
726  case vmIntrinsics::_addExactI:                return inline_math_addExactI(false /* add */);
727  case vmIntrinsics::_addExactL:                return inline_math_addExactL(false /* add */);
728  case vmIntrinsics::_decrementExactI:          return inline_math_subtractExactI(true /* decrement */);
729  case vmIntrinsics::_decrementExactL:          return inline_math_subtractExactL(true /* decrement */);
730  case vmIntrinsics::_incrementExactI:          return inline_math_addExactI(true /* increment */);
731  case vmIntrinsics::_incrementExactL:          return inline_math_addExactL(true /* increment */);
732  case vmIntrinsics::_multiplyExactI:           return inline_math_multiplyExactI();
733  case vmIntrinsics::_multiplyExactL:           return inline_math_multiplyExactL();
734  case vmIntrinsics::_negateExactI:             return inline_math_negateExactI();
735  case vmIntrinsics::_negateExactL:             return inline_math_negateExactL();
736  case vmIntrinsics::_subtractExactI:           return inline_math_subtractExactI(false /* subtract */);
737  case vmIntrinsics::_subtractExactL:           return inline_math_subtractExactL(false /* subtract */);
738
739  case vmIntrinsics::_arraycopy:                return inline_arraycopy();
740
741  case vmIntrinsics::_compareTo:                return inline_string_compareTo();
742  case vmIntrinsics::_indexOf:                  return inline_string_indexOf();
743  case vmIntrinsics::_equals:                   return inline_string_equals();
744
745  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile);
746  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
747  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile);
748  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
749  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
750  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
751  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
752  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
753  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
754
755  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile);
756  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile);
757  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile);
758  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
759  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
760  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
761  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
762  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
763  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
764
765  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile);
766  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile);
767  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile);
768  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile);
769  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile);
770  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
771  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
772  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
773
774  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile);
775  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile);
776  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile);
777  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile);
778  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile);
779  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
780  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
781  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile);
782
783  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
784  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
785  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
786  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
787  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
788  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
789  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
790  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
791  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
792
793  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
794  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
795  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
796  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
797  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
798  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
799  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
800  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
801  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
802
803  case vmIntrinsics::_prefetchRead:             return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
804  case vmIntrinsics::_prefetchWrite:            return inline_unsafe_prefetch(!is_native_ptr,  is_store, !is_static);
805  case vmIntrinsics::_prefetchReadStatic:       return inline_unsafe_prefetch(!is_native_ptr, !is_store,  is_static);
806  case vmIntrinsics::_prefetchWriteStatic:      return inline_unsafe_prefetch(!is_native_ptr,  is_store,  is_static);
807
808  case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
809  case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
810  case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
811
812  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
813  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
814  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
815
816  case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
817  case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
818  case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
819  case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
820  case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
821
822  case vmIntrinsics::_loadFence:
823  case vmIntrinsics::_storeFence:
824  case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
825
826  case vmIntrinsics::_currentThread:            return inline_native_currentThread();
827  case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
828
829#ifdef TRACE_HAVE_INTRINSICS
830  case vmIntrinsics::_classID:                  return inline_native_classID();
831  case vmIntrinsics::_threadID:                 return inline_native_threadID();
832  case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
833#endif
834  case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
835  case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
836  case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
837  case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
838  case vmIntrinsics::_newArray:                 return inline_native_newArray();
839  case vmIntrinsics::_getLength:                return inline_native_getLength();
840  case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
841  case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
842  case vmIntrinsics::_equalsC:                  return inline_array_equals();
843  case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
844
845  case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
846
847  case vmIntrinsics::_isInstance:
848  case vmIntrinsics::_getModifiers:
849  case vmIntrinsics::_isInterface:
850  case vmIntrinsics::_isArray:
851  case vmIntrinsics::_isPrimitive:
852  case vmIntrinsics::_getSuperclass:
853  case vmIntrinsics::_getComponentType:
854  case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
855
856  case vmIntrinsics::_floatToRawIntBits:
857  case vmIntrinsics::_floatToIntBits:
858  case vmIntrinsics::_intBitsToFloat:
859  case vmIntrinsics::_doubleToRawLongBits:
860  case vmIntrinsics::_doubleToLongBits:
861  case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
862
863  case vmIntrinsics::_numberOfLeadingZeros_i:
864  case vmIntrinsics::_numberOfLeadingZeros_l:
865  case vmIntrinsics::_numberOfTrailingZeros_i:
866  case vmIntrinsics::_numberOfTrailingZeros_l:
867  case vmIntrinsics::_bitCount_i:
868  case vmIntrinsics::_bitCount_l:
869  case vmIntrinsics::_reverseBytes_i:
870  case vmIntrinsics::_reverseBytes_l:
871  case vmIntrinsics::_reverseBytes_s:
872  case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
873
874  case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
875
876  case vmIntrinsics::_Reference_get:            return inline_reference_get();
877
878  case vmIntrinsics::_aescrypt_encryptBlock:
879  case vmIntrinsics::_aescrypt_decryptBlock:    return inline_aescrypt_Block(intrinsic_id());
880
881  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
882  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
883    return inline_cipherBlockChaining_AESCrypt(intrinsic_id());
884
885  case vmIntrinsics::_encodeISOArray:
886    return inline_encodeISOArray();
887
888  case vmIntrinsics::_updateCRC32:
889    return inline_updateCRC32();
890  case vmIntrinsics::_updateBytesCRC32:
891    return inline_updateBytesCRC32();
892  case vmIntrinsics::_updateByteBufferCRC32:
893    return inline_updateByteBufferCRC32();
894
895  default:
896    // If you get here, it may be that someone has added a new intrinsic
897    // to the list in vmSymbols.hpp without implementing it here.
898#ifndef PRODUCT
899    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
900      tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
901                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
902    }
903#endif
904    return false;
905  }
906}
907
908Node* LibraryCallKit::try_to_predicate() {
909  if (!jvms()->has_method()) {
910    // Root JVMState has a null method.
911    assert(map()->memory()->Opcode() == Op_Parm, "");
912    // Insert the memory aliasing node
913    set_all_memory(reset_memory());
914  }
915  assert(merged_memory(), "");
916
917  switch (intrinsic_id()) {
918  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
919    return inline_cipherBlockChaining_AESCrypt_predicate(false);
920  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
921    return inline_cipherBlockChaining_AESCrypt_predicate(true);
922
923  default:
924    // If you get here, it may be that someone has added a new intrinsic
925    // to the list in vmSymbols.hpp without implementing it here.
926#ifndef PRODUCT
927    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
928      tty->print_cr("*** Warning: Unimplemented predicate for intrinsic %s(%d)",
929                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
930    }
931#endif
932    Node* slow_ctl = control();
933    set_control(top()); // No fast path instrinsic
934    return slow_ctl;
935  }
936}
937
938//------------------------------set_result-------------------------------
939// Helper function for finishing intrinsics.
940void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
941  record_for_igvn(region);
942  set_control(_gvn.transform(region));
943  set_result( _gvn.transform(value));
944  assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
945}
946
947//------------------------------generate_guard---------------------------
948// Helper function for generating guarded fast-slow graph structures.
949// The given 'test', if true, guards a slow path.  If the test fails
950// then a fast path can be taken.  (We generally hope it fails.)
951// In all cases, GraphKit::control() is updated to the fast path.
952// The returned value represents the control for the slow path.
953// The return value is never 'top'; it is either a valid control
954// or NULL if it is obvious that the slow path can never be taken.
955// Also, if region and the slow control are not NULL, the slow edge
956// is appended to the region.
957Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
958  if (stopped()) {
959    // Already short circuited.
960    return NULL;
961  }
962
963  // Build an if node and its projections.
964  // If test is true we take the slow path, which we assume is uncommon.
965  if (_gvn.type(test) == TypeInt::ZERO) {
966    // The slow branch is never taken.  No need to build this guard.
967    return NULL;
968  }
969
970  IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
971
972  Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff));
973  if (if_slow == top()) {
974    // The slow branch is never taken.  No need to build this guard.
975    return NULL;
976  }
977
978  if (region != NULL)
979    region->add_req(if_slow);
980
981  Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff));
982  set_control(if_fast);
983
984  return if_slow;
985}
986
987inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
988  return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
989}
990inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
991  return generate_guard(test, region, PROB_FAIR);
992}
993
994inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
995                                                     Node* *pos_index) {
996  if (stopped())
997    return NULL;                // already stopped
998  if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
999    return NULL;                // index is already adequately typed
1000  Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0)));
1001  Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
1002  Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
1003  if (is_neg != NULL && pos_index != NULL) {
1004    // Emulate effect of Parse::adjust_map_after_if.
1005    Node* ccast = new (C) CastIINode(index, TypeInt::POS);
1006    ccast->set_req(0, control());
1007    (*pos_index) = _gvn.transform(ccast);
1008  }
1009  return is_neg;
1010}
1011
1012inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
1013                                                        Node* *pos_index) {
1014  if (stopped())
1015    return NULL;                // already stopped
1016  if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
1017    return NULL;                // index is already adequately typed
1018  Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0)));
1019  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
1020  Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq));
1021  Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
1022  if (is_notp != NULL && pos_index != NULL) {
1023    // Emulate effect of Parse::adjust_map_after_if.
1024    Node* ccast = new (C) CastIINode(index, TypeInt::POS1);
1025    ccast->set_req(0, control());
1026    (*pos_index) = _gvn.transform(ccast);
1027  }
1028  return is_notp;
1029}
1030
1031// Make sure that 'position' is a valid limit index, in [0..length].
1032// There are two equivalent plans for checking this:
1033//   A. (offset + copyLength)  unsigned<=  arrayLength
1034//   B. offset  <=  (arrayLength - copyLength)
1035// We require that all of the values above, except for the sum and
1036// difference, are already known to be non-negative.
1037// Plan A is robust in the face of overflow, if offset and copyLength
1038// are both hugely positive.
1039//
1040// Plan B is less direct and intuitive, but it does not overflow at
1041// all, since the difference of two non-negatives is always
1042// representable.  Whenever Java methods must perform the equivalent
1043// check they generally use Plan B instead of Plan A.
1044// For the moment we use Plan A.
1045inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
1046                                                  Node* subseq_length,
1047                                                  Node* array_length,
1048                                                  RegionNode* region) {
1049  if (stopped())
1050    return NULL;                // already stopped
1051  bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
1052  if (zero_offset && subseq_length->eqv_uncast(array_length))
1053    return NULL;                // common case of whole-array copy
1054  Node* last = subseq_length;
1055  if (!zero_offset)             // last += offset
1056    last = _gvn.transform(new (C) AddINode(last, offset));
1057  Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last));
1058  Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt));
1059  Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
1060  return is_over;
1061}
1062
1063
1064//--------------------------generate_current_thread--------------------
1065Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1066  ciKlass*    thread_klass = env()->Thread_klass();
1067  const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1068  Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1069  Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1070  Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1071  tls_output = thread;
1072  return threadObj;
1073}
1074
1075
1076//------------------------------make_string_method_node------------------------
1077// Helper method for String intrinsic functions. This version is called
1078// with str1 and str2 pointing to String object nodes.
1079//
1080Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1, Node* str2) {
1081  Node* no_ctrl = NULL;
1082
1083  // Get start addr of string
1084  Node* str1_value   = load_String_value(no_ctrl, str1);
1085  Node* str1_offset  = load_String_offset(no_ctrl, str1);
1086  Node* str1_start   = array_element_address(str1_value, str1_offset, T_CHAR);
1087
1088  // Get length of string 1
1089  Node* str1_len  = load_String_length(no_ctrl, str1);
1090
1091  Node* str2_value   = load_String_value(no_ctrl, str2);
1092  Node* str2_offset  = load_String_offset(no_ctrl, str2);
1093  Node* str2_start   = array_element_address(str2_value, str2_offset, T_CHAR);
1094
1095  Node* str2_len = NULL;
1096  Node* result = NULL;
1097
1098  switch (opcode) {
1099  case Op_StrIndexOf:
1100    // Get length of string 2
1101    str2_len = load_String_length(no_ctrl, str2);
1102
1103    result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1104                                 str1_start, str1_len, str2_start, str2_len);
1105    break;
1106  case Op_StrComp:
1107    // Get length of string 2
1108    str2_len = load_String_length(no_ctrl, str2);
1109
1110    result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1111                                 str1_start, str1_len, str2_start, str2_len);
1112    break;
1113  case Op_StrEquals:
1114    result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1115                               str1_start, str2_start, str1_len);
1116    break;
1117  default:
1118    ShouldNotReachHere();
1119    return NULL;
1120  }
1121
1122  // All these intrinsics have checks.
1123  C->set_has_split_ifs(true); // Has chance for split-if optimization
1124
1125  return _gvn.transform(result);
1126}
1127
1128// Helper method for String intrinsic functions. This version is called
1129// with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing
1130// to Int nodes containing the lenghts of str1 and str2.
1131//
1132Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
1133  Node* result = NULL;
1134  switch (opcode) {
1135  case Op_StrIndexOf:
1136    result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS),
1137                                 str1_start, cnt1, str2_start, cnt2);
1138    break;
1139  case Op_StrComp:
1140    result = new (C) StrCompNode(control(), memory(TypeAryPtr::CHARS),
1141                                 str1_start, cnt1, str2_start, cnt2);
1142    break;
1143  case Op_StrEquals:
1144    result = new (C) StrEqualsNode(control(), memory(TypeAryPtr::CHARS),
1145                                 str1_start, str2_start, cnt1);
1146    break;
1147  default:
1148    ShouldNotReachHere();
1149    return NULL;
1150  }
1151
1152  // All these intrinsics have checks.
1153  C->set_has_split_ifs(true); // Has chance for split-if optimization
1154
1155  return _gvn.transform(result);
1156}
1157
1158//------------------------------inline_string_compareTo------------------------
1159// public int java.lang.String.compareTo(String anotherString);
1160bool LibraryCallKit::inline_string_compareTo() {
1161  Node* receiver = null_check(argument(0));
1162  Node* arg      = null_check(argument(1));
1163  if (stopped()) {
1164    return true;
1165  }
1166  set_result(make_string_method_node(Op_StrComp, receiver, arg));
1167  return true;
1168}
1169
1170//------------------------------inline_string_equals------------------------
1171bool LibraryCallKit::inline_string_equals() {
1172  Node* receiver = null_check_receiver();
1173  // NOTE: Do not null check argument for String.equals() because spec
1174  // allows to specify NULL as argument.
1175  Node* argument = this->argument(1);
1176  if (stopped()) {
1177    return true;
1178  }
1179
1180  // paths (plus control) merge
1181  RegionNode* region = new (C) RegionNode(5);
1182  Node* phi = new (C) PhiNode(region, TypeInt::BOOL);
1183
1184  // does source == target string?
1185  Node* cmp = _gvn.transform(new (C) CmpPNode(receiver, argument));
1186  Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::eq));
1187
1188  Node* if_eq = generate_slow_guard(bol, NULL);
1189  if (if_eq != NULL) {
1190    // receiver == argument
1191    phi->init_req(2, intcon(1));
1192    region->init_req(2, if_eq);
1193  }
1194
1195  // get String klass for instanceOf
1196  ciInstanceKlass* klass = env()->String_klass();
1197
1198  if (!stopped()) {
1199    Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
1200    Node* cmp  = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
1201    Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
1202
1203    Node* inst_false = generate_guard(bol, NULL, PROB_MIN);
1204    //instanceOf == true, fallthrough
1205
1206    if (inst_false != NULL) {
1207      phi->init_req(3, intcon(0));
1208      region->init_req(3, inst_false);
1209    }
1210  }
1211
1212  if (!stopped()) {
1213    const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass);
1214
1215    // Properly cast the argument to String
1216    argument = _gvn.transform(new (C) CheckCastPPNode(control(), argument, string_type));
1217    // This path is taken only when argument's type is String:NotNull.
1218    argument = cast_not_null(argument, false);
1219
1220    Node* no_ctrl = NULL;
1221
1222    // Get start addr of receiver
1223    Node* receiver_val    = load_String_value(no_ctrl, receiver);
1224    Node* receiver_offset = load_String_offset(no_ctrl, receiver);
1225    Node* receiver_start = array_element_address(receiver_val, receiver_offset, T_CHAR);
1226
1227    // Get length of receiver
1228    Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
1229
1230    // Get start addr of argument
1231    Node* argument_val    = load_String_value(no_ctrl, argument);
1232    Node* argument_offset = load_String_offset(no_ctrl, argument);
1233    Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
1234
1235    // Get length of argument
1236    Node* argument_cnt  = load_String_length(no_ctrl, argument);
1237
1238    // Check for receiver count != argument count
1239    Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt));
1240    Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne));
1241    Node* if_ne = generate_slow_guard(bol, NULL);
1242    if (if_ne != NULL) {
1243      phi->init_req(4, intcon(0));
1244      region->init_req(4, if_ne);
1245    }
1246
1247    // Check for count == 0 is done by assembler code for StrEquals.
1248
1249    if (!stopped()) {
1250      Node* equals = make_string_method_node(Op_StrEquals, receiver_start, receiver_cnt, argument_start, argument_cnt);
1251      phi->init_req(1, equals);
1252      region->init_req(1, control());
1253    }
1254  }
1255
1256  // post merge
1257  set_control(_gvn.transform(region));
1258  record_for_igvn(region);
1259
1260  set_result(_gvn.transform(phi));
1261  return true;
1262}
1263
1264//------------------------------inline_array_equals----------------------------
1265bool LibraryCallKit::inline_array_equals() {
1266  Node* arg1 = argument(0);
1267  Node* arg2 = argument(1);
1268  set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
1269  return true;
1270}
1271
1272// Java version of String.indexOf(constant string)
1273// class StringDecl {
1274//   StringDecl(char[] ca) {
1275//     offset = 0;
1276//     count = ca.length;
1277//     value = ca;
1278//   }
1279//   int offset;
1280//   int count;
1281//   char[] value;
1282// }
1283//
1284// static int string_indexOf_J(StringDecl string_object, char[] target_object,
1285//                             int targetOffset, int cache_i, int md2) {
1286//   int cache = cache_i;
1287//   int sourceOffset = string_object.offset;
1288//   int sourceCount = string_object.count;
1289//   int targetCount = target_object.length;
1290//
1291//   int targetCountLess1 = targetCount - 1;
1292//   int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
1293//
1294//   char[] source = string_object.value;
1295//   char[] target = target_object;
1296//   int lastChar = target[targetCountLess1];
1297//
1298//  outer_loop:
1299//   for (int i = sourceOffset; i < sourceEnd; ) {
1300//     int src = source[i + targetCountLess1];
1301//     if (src == lastChar) {
1302//       // With random strings and a 4-character alphabet,
1303//       // reverse matching at this point sets up 0.8% fewer
1304//       // frames, but (paradoxically) makes 0.3% more probes.
1305//       // Since those probes are nearer the lastChar probe,
1306//       // there is may be a net D$ win with reverse matching.
1307//       // But, reversing loop inhibits unroll of inner loop
1308//       // for unknown reason.  So, does running outer loop from
1309//       // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
1310//       for (int j = 0; j < targetCountLess1; j++) {
1311//         if (target[targetOffset + j] != source[i+j]) {
1312//           if ((cache & (1 << source[i+j])) == 0) {
1313//             if (md2 < j+1) {
1314//               i += j+1;
1315//               continue outer_loop;
1316//             }
1317//           }
1318//           i += md2;
1319//           continue outer_loop;
1320//         }
1321//       }
1322//       return i - sourceOffset;
1323//     }
1324//     if ((cache & (1 << src)) == 0) {
1325//       i += targetCountLess1;
1326//     } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1327//     i++;
1328//   }
1329//   return -1;
1330// }
1331
1332//------------------------------string_indexOf------------------------
1333Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1334                                     jint cache_i, jint md2_i) {
1335
1336  Node* no_ctrl  = NULL;
1337  float likely   = PROB_LIKELY(0.9);
1338  float unlikely = PROB_UNLIKELY(0.9);
1339
1340  const int nargs = 0; // no arguments to push back for uncommon trap in predicate
1341
1342  Node* source        = load_String_value(no_ctrl, string_object);
1343  Node* sourceOffset  = load_String_offset(no_ctrl, string_object);
1344  Node* sourceCount   = load_String_length(no_ctrl, string_object);
1345
1346  Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)));
1347  jint target_length = target_array->length();
1348  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1349  const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1350
1351  // String.value field is known to be @Stable.
1352  if (UseImplicitStableValues) {
1353    target = cast_array_to_stable(target, target_type);
1354  }
1355
1356  IdealKit kit(this, false, true);
1357#define __ kit.
1358  Node* zero             = __ ConI(0);
1359  Node* one              = __ ConI(1);
1360  Node* cache            = __ ConI(cache_i);
1361  Node* md2              = __ ConI(md2_i);
1362  Node* lastChar         = __ ConI(target_array->char_at(target_length - 1));
1363  Node* targetCount      = __ ConI(target_length);
1364  Node* targetCountLess1 = __ ConI(target_length - 1);
1365  Node* targetOffset     = __ ConI(targetOffset_i);
1366  Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1367
1368  IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
1369  Node* outer_loop = __ make_label(2 /* goto */);
1370  Node* return_    = __ make_label(1);
1371
1372  __ set(rtn,__ ConI(-1));
1373  __ loop(this, nargs, i, sourceOffset, BoolTest::lt, sourceEnd); {
1374       Node* i2  = __ AddI(__ value(i), targetCountLess1);
1375       // pin to prohibit loading of "next iteration" value which may SEGV (rare)
1376       Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1377       __ if_then(src, BoolTest::eq, lastChar, unlikely); {
1378         __ loop(this, nargs, j, zero, BoolTest::lt, targetCountLess1); {
1379              Node* tpj = __ AddI(targetOffset, __ value(j));
1380              Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1381              Node* ipj  = __ AddI(__ value(i), __ value(j));
1382              Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1383              __ if_then(targ, BoolTest::ne, src2); {
1384                __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1385                  __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1386                    __ increment(i, __ AddI(__ value(j), one));
1387                    __ goto_(outer_loop);
1388                  } __ end_if(); __ dead(j);
1389                }__ end_if(); __ dead(j);
1390                __ increment(i, md2);
1391                __ goto_(outer_loop);
1392              }__ end_if();
1393              __ increment(j, one);
1394         }__ end_loop(); __ dead(j);
1395         __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1396         __ goto_(return_);
1397       }__ end_if();
1398       __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1399         __ increment(i, targetCountLess1);
1400       }__ end_if();
1401       __ increment(i, one);
1402       __ bind(outer_loop);
1403  }__ end_loop(); __ dead(i);
1404  __ bind(return_);
1405
1406  // Final sync IdealKit and GraphKit.
1407  final_sync(kit);
1408  Node* result = __ value(rtn);
1409#undef __
1410  C->set_has_loops(true);
1411  return result;
1412}
1413
1414//------------------------------inline_string_indexOf------------------------
1415bool LibraryCallKit::inline_string_indexOf() {
1416  Node* receiver = argument(0);
1417  Node* arg      = argument(1);
1418
1419  Node* result;
1420  // Disable the use of pcmpestri until it can be guaranteed that
1421  // the load doesn't cross into the uncommited space.
1422  if (Matcher::has_match_rule(Op_StrIndexOf) &&
1423      UseSSE42Intrinsics) {
1424    // Generate SSE4.2 version of indexOf
1425    // We currently only have match rules that use SSE4.2
1426
1427    receiver = null_check(receiver);
1428    arg      = null_check(arg);
1429    if (stopped()) {
1430      return true;
1431    }
1432
1433    ciInstanceKlass* str_klass = env()->String_klass();
1434    const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass);
1435
1436    // Make the merge point
1437    RegionNode* result_rgn = new (C) RegionNode(4);
1438    Node*       result_phi = new (C) PhiNode(result_rgn, TypeInt::INT);
1439    Node* no_ctrl  = NULL;
1440
1441    // Get start addr of source string
1442    Node* source = load_String_value(no_ctrl, receiver);
1443    Node* source_offset = load_String_offset(no_ctrl, receiver);
1444    Node* source_start = array_element_address(source, source_offset, T_CHAR);
1445
1446    // Get length of source string
1447    Node* source_cnt  = load_String_length(no_ctrl, receiver);
1448
1449    // Get start addr of substring
1450    Node* substr = load_String_value(no_ctrl, arg);
1451    Node* substr_offset = load_String_offset(no_ctrl, arg);
1452    Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
1453
1454    // Get length of source string
1455    Node* substr_cnt  = load_String_length(no_ctrl, arg);
1456
1457    // Check for substr count > string count
1458    Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt));
1459    Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt));
1460    Node* if_gt = generate_slow_guard(bol, NULL);
1461    if (if_gt != NULL) {
1462      result_phi->init_req(2, intcon(-1));
1463      result_rgn->init_req(2, if_gt);
1464    }
1465
1466    if (!stopped()) {
1467      // Check for substr count == 0
1468      cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0)));
1469      bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
1470      Node* if_zero = generate_slow_guard(bol, NULL);
1471      if (if_zero != NULL) {
1472        result_phi->init_req(3, intcon(0));
1473        result_rgn->init_req(3, if_zero);
1474      }
1475    }
1476
1477    if (!stopped()) {
1478      result = make_string_method_node(Op_StrIndexOf, source_start, source_cnt, substr_start, substr_cnt);
1479      result_phi->init_req(1, result);
1480      result_rgn->init_req(1, control());
1481    }
1482    set_control(_gvn.transform(result_rgn));
1483    record_for_igvn(result_rgn);
1484    result = _gvn.transform(result_phi);
1485
1486  } else { // Use LibraryCallKit::string_indexOf
1487    // don't intrinsify if argument isn't a constant string.
1488    if (!arg->is_Con()) {
1489     return false;
1490    }
1491    const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
1492    if (str_type == NULL) {
1493      return false;
1494    }
1495    ciInstanceKlass* klass = env()->String_klass();
1496    ciObject* str_const = str_type->const_oop();
1497    if (str_const == NULL || str_const->klass() != klass) {
1498      return false;
1499    }
1500    ciInstance* str = str_const->as_instance();
1501    assert(str != NULL, "must be instance");
1502
1503    ciObject* v = str->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
1504    ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1505
1506    int o;
1507    int c;
1508    if (java_lang_String::has_offset_field()) {
1509      o = str->field_value_by_offset(java_lang_String::offset_offset_in_bytes()).as_int();
1510      c = str->field_value_by_offset(java_lang_String::count_offset_in_bytes()).as_int();
1511    } else {
1512      o = 0;
1513      c = pat->length();
1514    }
1515
1516    // constant strings have no offset and count == length which
1517    // simplifies the resulting code somewhat so lets optimize for that.
1518    if (o != 0 || c != pat->length()) {
1519     return false;
1520    }
1521
1522    receiver = null_check(receiver, T_OBJECT);
1523    // NOTE: No null check on the argument is needed since it's a constant String oop.
1524    if (stopped()) {
1525      return true;
1526    }
1527
1528    // The null string as a pattern always returns 0 (match at beginning of string)
1529    if (c == 0) {
1530      set_result(intcon(0));
1531      return true;
1532    }
1533
1534    // Generate default indexOf
1535    jchar lastChar = pat->char_at(o + (c - 1));
1536    int cache = 0;
1537    int i;
1538    for (i = 0; i < c - 1; i++) {
1539      assert(i < pat->length(), "out of range");
1540      cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1541    }
1542
1543    int md2 = c;
1544    for (i = 0; i < c - 1; i++) {
1545      assert(i < pat->length(), "out of range");
1546      if (pat->char_at(o + i) == lastChar) {
1547        md2 = (c - 1) - i;
1548      }
1549    }
1550
1551    result = string_indexOf(receiver, pat, o, cache, md2);
1552  }
1553  set_result(result);
1554  return true;
1555}
1556
1557//--------------------------round_double_node--------------------------------
1558// Round a double node if necessary.
1559Node* LibraryCallKit::round_double_node(Node* n) {
1560  if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
1561    n = _gvn.transform(new (C) RoundDoubleNode(0, n));
1562  return n;
1563}
1564
1565//------------------------------inline_math-----------------------------------
1566// public static double Math.abs(double)
1567// public static double Math.sqrt(double)
1568// public static double Math.log(double)
1569// public static double Math.log10(double)
1570bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
1571  Node* arg = round_double_node(argument(0));
1572  Node* n;
1573  switch (id) {
1574  case vmIntrinsics::_dabs:   n = new (C) AbsDNode(                arg);  break;
1575  case vmIntrinsics::_dsqrt:  n = new (C) SqrtDNode(C, control(),  arg);  break;
1576  case vmIntrinsics::_dlog:   n = new (C) LogDNode(C, control(),   arg);  break;
1577  case vmIntrinsics::_dlog10: n = new (C) Log10DNode(C, control(), arg);  break;
1578  default:  fatal_unexpected_iid(id);  break;
1579  }
1580  set_result(_gvn.transform(n));
1581  return true;
1582}
1583
1584//------------------------------inline_trig----------------------------------
1585// Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1586// argument reduction which will turn into a fast/slow diamond.
1587bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1588  Node* arg = round_double_node(argument(0));
1589  Node* n = NULL;
1590
1591  switch (id) {
1592  case vmIntrinsics::_dsin:  n = new (C) SinDNode(C, control(), arg);  break;
1593  case vmIntrinsics::_dcos:  n = new (C) CosDNode(C, control(), arg);  break;
1594  case vmIntrinsics::_dtan:  n = new (C) TanDNode(C, control(), arg);  break;
1595  default:  fatal_unexpected_iid(id);  break;
1596  }
1597  n = _gvn.transform(n);
1598
1599  // Rounding required?  Check for argument reduction!
1600  if (Matcher::strict_fp_requires_explicit_rounding) {
1601    static const double     pi_4 =  0.7853981633974483;
1602    static const double neg_pi_4 = -0.7853981633974483;
1603    // pi/2 in 80-bit extended precision
1604    // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1605    // -pi/2 in 80-bit extended precision
1606    // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1607    // Cutoff value for using this argument reduction technique
1608    //static const double    pi_2_minus_epsilon =  1.564660403643354;
1609    //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1610
1611    // Pseudocode for sin:
1612    // if (x <= Math.PI / 4.0) {
1613    //   if (x >= -Math.PI / 4.0) return  fsin(x);
1614    //   if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1615    // } else {
1616    //   if (x <=  Math.PI / 2.0) return  fcos(x - Math.PI / 2.0);
1617    // }
1618    // return StrictMath.sin(x);
1619
1620    // Pseudocode for cos:
1621    // if (x <= Math.PI / 4.0) {
1622    //   if (x >= -Math.PI / 4.0) return  fcos(x);
1623    //   if (x >= -Math.PI / 2.0) return  fsin(x + Math.PI / 2.0);
1624    // } else {
1625    //   if (x <=  Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1626    // }
1627    // return StrictMath.cos(x);
1628
1629    // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1630    // requires a special machine instruction to load it.  Instead we'll try
1631    // the 'easy' case.  If we really need the extra range +/- PI/2 we'll
1632    // probably do the math inside the SIN encoding.
1633
1634    // Make the merge point
1635    RegionNode* r = new (C) RegionNode(3);
1636    Node* phi = new (C) PhiNode(r, Type::DOUBLE);
1637
1638    // Flatten arg so we need only 1 test
1639    Node *abs = _gvn.transform(new (C) AbsDNode(arg));
1640    // Node for PI/4 constant
1641    Node *pi4 = makecon(TypeD::make(pi_4));
1642    // Check PI/4 : abs(arg)
1643    Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs));
1644    // Check: If PI/4 < abs(arg) then go slow
1645    Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt ));
1646    // Branch either way
1647    IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1648    set_control(opt_iff(r,iff));
1649
1650    // Set fast path result
1651    phi->init_req(2, n);
1652
1653    // Slow path - non-blocking leaf call
1654    Node* call = NULL;
1655    switch (id) {
1656    case vmIntrinsics::_dsin:
1657      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1658                               CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1659                               "Sin", NULL, arg, top());
1660      break;
1661    case vmIntrinsics::_dcos:
1662      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1663                               CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1664                               "Cos", NULL, arg, top());
1665      break;
1666    case vmIntrinsics::_dtan:
1667      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1668                               CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1669                               "Tan", NULL, arg, top());
1670      break;
1671    }
1672    assert(control()->in(0) == call, "");
1673    Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
1674    r->init_req(1, control());
1675    phi->init_req(1, slow_result);
1676
1677    // Post-merge
1678    set_control(_gvn.transform(r));
1679    record_for_igvn(r);
1680    n = _gvn.transform(phi);
1681
1682    C->set_has_split_ifs(true); // Has chance for split-if optimization
1683  }
1684  set_result(n);
1685  return true;
1686}
1687
1688Node* LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) {
1689  //-------------------
1690  //result=(result.isNaN())? funcAddr():result;
1691  // Check: If isNaN() by checking result!=result? then either trap
1692  // or go to runtime
1693  Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result));
1694  // Build the boolean node
1695  Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq));
1696
1697  if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1698    { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1699      // The pow or exp intrinsic returned a NaN, which requires a call
1700      // to the runtime.  Recompile with the runtime call.
1701      uncommon_trap(Deoptimization::Reason_intrinsic,
1702                    Deoptimization::Action_make_not_entrant);
1703    }
1704    return result;
1705  } else {
1706    // If this inlining ever returned NaN in the past, we compile a call
1707    // to the runtime to properly handle corner cases
1708
1709    IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1710    Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff));
1711    Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff));
1712
1713    if (!if_slow->is_top()) {
1714      RegionNode* result_region = new (C) RegionNode(3);
1715      PhiNode*    result_val = new (C) PhiNode(result_region, Type::DOUBLE);
1716
1717      result_region->init_req(1, if_fast);
1718      result_val->init_req(1, result);
1719
1720      set_control(if_slow);
1721
1722      const TypePtr* no_memory_effects = NULL;
1723      Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1724                                   no_memory_effects,
1725                                   x, top(), y, y ? top() : NULL);
1726      Node* value = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+0));
1727#ifdef ASSERT
1728      Node* value_top = _gvn.transform(new (C) ProjNode(rt, TypeFunc::Parms+1));
1729      assert(value_top == top(), "second value must be top");
1730#endif
1731
1732      result_region->init_req(2, control());
1733      result_val->init_req(2, value);
1734      set_control(_gvn.transform(result_region));
1735      return _gvn.transform(result_val);
1736    } else {
1737      return result;
1738    }
1739  }
1740}
1741
1742//------------------------------inline_exp-------------------------------------
1743// Inline exp instructions, if possible.  The Intel hardware only misses
1744// really odd corner cases (+/- Infinity).  Just uncommon-trap them.
1745bool LibraryCallKit::inline_exp() {
1746  Node* arg = round_double_node(argument(0));
1747  Node* n   = _gvn.transform(new (C) ExpDNode(C, control(), arg));
1748
1749  n = finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1750  set_result(n);
1751
1752  C->set_has_split_ifs(true); // Has chance for split-if optimization
1753  return true;
1754}
1755
1756//------------------------------inline_pow-------------------------------------
1757// Inline power instructions, if possible.
1758bool LibraryCallKit::inline_pow() {
1759  // Pseudocode for pow
1760  // if (y == 2) {
1761  //   return x * x;
1762  // } else {
1763  //   if (x <= 0.0) {
1764  //     long longy = (long)y;
1765  //     if ((double)longy == y) { // if y is long
1766  //       if (y + 1 == y) longy = 0; // huge number: even
1767  //       result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y);
1768  //     } else {
1769  //       result = NaN;
1770  //     }
1771  //   } else {
1772  //     result = DPow(x,y);
1773  //   }
1774  //   if (result != result)?  {
1775  //     result = uncommon_trap() or runtime_call();
1776  //   }
1777  //   return result;
1778  // }
1779
1780  Node* x = round_double_node(argument(0));
1781  Node* y = round_double_node(argument(2));
1782
1783  Node* result = NULL;
1784
1785  Node*   const_two_node = makecon(TypeD::make(2.0));
1786  Node*   cmp_node       = _gvn.transform(new (C) CmpDNode(y, const_two_node));
1787  Node*   bool_node      = _gvn.transform(new (C) BoolNode(cmp_node, BoolTest::eq));
1788  IfNode* if_node        = create_and_xform_if(control(), bool_node, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1789  Node*   if_true        = _gvn.transform(new (C) IfTrueNode(if_node));
1790  Node*   if_false       = _gvn.transform(new (C) IfFalseNode(if_node));
1791
1792  RegionNode* region_node = new (C) RegionNode(3);
1793  region_node->init_req(1, if_true);
1794
1795  Node* phi_node = new (C) PhiNode(region_node, Type::DOUBLE);
1796  // special case for x^y where y == 2, we can convert it to x * x
1797  phi_node->init_req(1, _gvn.transform(new (C) MulDNode(x, x)));
1798
1799  // set control to if_false since we will now process the false branch
1800  set_control(if_false);
1801
1802  if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
1803    // Short form: skip the fancy tests and just check for NaN result.
1804    result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1805  } else {
1806    // If this inlining ever returned NaN in the past, include all
1807    // checks + call to the runtime.
1808
1809    // Set the merge point for If node with condition of (x <= 0.0)
1810    // There are four possible paths to region node and phi node
1811    RegionNode *r = new (C) RegionNode(4);
1812    Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1813
1814    // Build the first if node: if (x <= 0.0)
1815    // Node for 0 constant
1816    Node *zeronode = makecon(TypeD::ZERO);
1817    // Check x:0
1818    Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode));
1819    // Check: If (x<=0) then go complex path
1820    Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le ));
1821    // Branch either way
1822    IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1823    // Fast path taken; set region slot 3
1824    Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1));
1825    r->init_req(3,fast_taken); // Capture fast-control
1826
1827    // Fast path not-taken, i.e. slow path
1828    Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1));
1829
1830    // Set fast path result
1831    Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y));
1832    phi->init_req(3, fast_result);
1833
1834    // Complex path
1835    // Build the second if node (if y is long)
1836    // Node for (long)y
1837    Node *longy = _gvn.transform(new (C) ConvD2LNode(y));
1838    // Node for (double)((long) y)
1839    Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy));
1840    // Check (double)((long) y) : y
1841    Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y));
1842    // Check if (y isn't long) then go to slow path
1843
1844    Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne ));
1845    // Branch either way
1846    IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1847    Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2));
1848
1849    Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2));
1850
1851    // Calculate DPow(abs(x), y)*(1 & (long)y)
1852    // Node for constant 1
1853    Node *conone = longcon(1);
1854    // 1& (long)y
1855    Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy));
1856
1857    // A huge number is always even. Detect a huge number by checking
1858    // if y + 1 == y and set integer to be tested for parity to 0.
1859    // Required for corner case:
1860    // (long)9.223372036854776E18 = max_jlong
1861    // (double)(long)9.223372036854776E18 = 9.223372036854776E18
1862    // max_jlong is odd but 9.223372036854776E18 is even
1863    Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1))));
1864    Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y));
1865    Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq ));
1866    Node* correctedsign = NULL;
1867    if (ConditionalMoveLimit != 0) {
1868      correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG));
1869    } else {
1870      IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN);
1871      RegionNode *r = new (C) RegionNode(3);
1872      Node *phi = new (C) PhiNode(r, TypeLong::LONG);
1873      r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1)));
1874      r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1)));
1875      phi->init_req(1, signnode);
1876      phi->init_req(2, longcon(0));
1877      correctedsign = _gvn.transform(phi);
1878      ylong_path = _gvn.transform(r);
1879      record_for_igvn(r);
1880    }
1881
1882    // zero node
1883    Node *conzero = longcon(0);
1884    // Check (1&(long)y)==0?
1885    Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero));
1886    // Check if (1&(long)y)!=0?, if so the result is negative
1887    Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne ));
1888    // abs(x)
1889    Node *absx=_gvn.transform(new (C) AbsDNode(x));
1890    // abs(x)^y
1891    Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y));
1892    // -abs(x)^y
1893    Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy));
1894    // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1895    Node *signresult = NULL;
1896    if (ConditionalMoveLimit != 0) {
1897      signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1898    } else {
1899      IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN);
1900      RegionNode *r = new (C) RegionNode(3);
1901      Node *phi = new (C) PhiNode(r, Type::DOUBLE);
1902      r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven)));
1903      r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven)));
1904      phi->init_req(1, absxpowy);
1905      phi->init_req(2, negabsxpowy);
1906      signresult = _gvn.transform(phi);
1907      ylong_path = _gvn.transform(r);
1908      record_for_igvn(r);
1909    }
1910    // Set complex path fast result
1911    r->init_req(2, ylong_path);
1912    phi->init_req(2, signresult);
1913
1914    static const jlong nan_bits = CONST64(0x7ff8000000000000);
1915    Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
1916    r->init_req(1,slow_path);
1917    phi->init_req(1,slow_result);
1918
1919    // Post merge
1920    set_control(_gvn.transform(r));
1921    record_for_igvn(r);
1922    result = _gvn.transform(phi);
1923  }
1924
1925  result = finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1926
1927  // control from finish_pow_exp is now input to the region node
1928  region_node->set_req(2, control());
1929  // the result from finish_pow_exp is now input to the phi node
1930  phi_node->init_req(2, result);
1931  set_control(_gvn.transform(region_node));
1932  record_for_igvn(region_node);
1933  set_result(_gvn.transform(phi_node));
1934
1935  C->set_has_split_ifs(true); // Has chance for split-if optimization
1936  return true;
1937}
1938
1939//------------------------------runtime_math-----------------------------
1940bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1941  assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1942         "must be (DD)D or (D)D type");
1943
1944  // Inputs
1945  Node* a = round_double_node(argument(0));
1946  Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
1947
1948  const TypePtr* no_memory_effects = NULL;
1949  Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1950                                 no_memory_effects,
1951                                 a, top(), b, b ? top() : NULL);
1952  Node* value = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+0));
1953#ifdef ASSERT
1954  Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1));
1955  assert(value_top == top(), "second value must be top");
1956#endif
1957
1958  set_result(value);
1959  return true;
1960}
1961
1962//------------------------------inline_math_native-----------------------------
1963bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1964#define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
1965  switch (id) {
1966    // These intrinsics are not properly supported on all hardware
1967  case vmIntrinsics::_dcos:   return Matcher::has_match_rule(Op_CosD)   ? inline_trig(id) :
1968    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos),   "COS");
1969  case vmIntrinsics::_dsin:   return Matcher::has_match_rule(Op_SinD)   ? inline_trig(id) :
1970    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin),   "SIN");
1971  case vmIntrinsics::_dtan:   return Matcher::has_match_rule(Op_TanD)   ? inline_trig(id) :
1972    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan),   "TAN");
1973
1974  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD)   ? inline_math(id) :
1975    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog),   "LOG");
1976  case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
1977    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
1978
1979    // These intrinsics are supported on all hardware
1980  case vmIntrinsics::_dsqrt:  return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
1981  case vmIntrinsics::_dabs:   return Matcher::has_match_rule(Op_AbsD)   ? inline_math(id) : false;
1982
1983  case vmIntrinsics::_dexp:   return Matcher::has_match_rule(Op_ExpD)   ? inline_exp()    :
1984    runtime_math(OptoRuntime::Math_D_D_Type(),  FN_PTR(SharedRuntime::dexp),  "EXP");
1985  case vmIntrinsics::_dpow:   return Matcher::has_match_rule(Op_PowD)   ? inline_pow()    :
1986    runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow),  "POW");
1987#undef FN_PTR
1988
1989   // These intrinsics are not yet correctly implemented
1990  case vmIntrinsics::_datan2:
1991    return false;
1992
1993  default:
1994    fatal_unexpected_iid(id);
1995    return false;
1996  }
1997}
1998
1999static bool is_simple_name(Node* n) {
2000  return (n->req() == 1         // constant
2001          || (n->is_Type() && n->as_Type()->type()->singleton())
2002          || n->is_Proj()       // parameter or return value
2003          || n->is_Phi()        // local of some sort
2004          );
2005}
2006
2007//----------------------------inline_min_max-----------------------------------
2008bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
2009  set_result(generate_min_max(id, argument(0), argument(1)));
2010  return true;
2011}
2012
2013void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
2014  Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
2015  IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2016  Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
2017  Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
2018
2019  {
2020    PreserveJVMState pjvms(this);
2021    PreserveReexecuteState preexecs(this);
2022    jvms()->set_should_reexecute(true);
2023
2024    set_control(slow_path);
2025    set_i_o(i_o());
2026
2027    uncommon_trap(Deoptimization::Reason_intrinsic,
2028                  Deoptimization::Action_none);
2029  }
2030
2031  set_control(fast_path);
2032  set_result(math);
2033}
2034
2035template <typename OverflowOp>
2036bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
2037  typedef typename OverflowOp::MathOp MathOp;
2038
2039  MathOp* mathOp = new(C) MathOp(arg1, arg2);
2040  Node* operation = _gvn.transform( mathOp );
2041  Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
2042  inline_math_mathExact(operation, ofcheck);
2043  return true;
2044}
2045
2046bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
2047  return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
2048}
2049
2050bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
2051  return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
2052}
2053
2054bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
2055  return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
2056}
2057
2058bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
2059  return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
2060}
2061
2062bool LibraryCallKit::inline_math_negateExactI() {
2063  return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
2064}
2065
2066bool LibraryCallKit::inline_math_negateExactL() {
2067  return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
2068}
2069
2070bool LibraryCallKit::inline_math_multiplyExactI() {
2071  return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
2072}
2073
2074bool LibraryCallKit::inline_math_multiplyExactL() {
2075  return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
2076}
2077
2078Node*
2079LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
2080  // These are the candidate return value:
2081  Node* xvalue = x0;
2082  Node* yvalue = y0;
2083
2084  if (xvalue == yvalue) {
2085    return xvalue;
2086  }
2087
2088  bool want_max = (id == vmIntrinsics::_max);
2089
2090  const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
2091  const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
2092  if (txvalue == NULL || tyvalue == NULL)  return top();
2093  // This is not really necessary, but it is consistent with a
2094  // hypothetical MaxINode::Value method:
2095  int widen = MAX2(txvalue->_widen, tyvalue->_widen);
2096
2097  // %%% This folding logic should (ideally) be in a different place.
2098  // Some should be inside IfNode, and there to be a more reliable
2099  // transformation of ?: style patterns into cmoves.  We also want
2100  // more powerful optimizations around cmove and min/max.
2101
2102  // Try to find a dominating comparison of these guys.
2103  // It can simplify the index computation for Arrays.copyOf
2104  // and similar uses of System.arraycopy.
2105  // First, compute the normalized version of CmpI(x, y).
2106  int   cmp_op = Op_CmpI;
2107  Node* xkey = xvalue;
2108  Node* ykey = yvalue;
2109  Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey));
2110  if (ideal_cmpxy->is_Cmp()) {
2111    // E.g., if we have CmpI(length - offset, count),
2112    // it might idealize to CmpI(length, count + offset)
2113    cmp_op = ideal_cmpxy->Opcode();
2114    xkey = ideal_cmpxy->in(1);
2115    ykey = ideal_cmpxy->in(2);
2116  }
2117
2118  // Start by locating any relevant comparisons.
2119  Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
2120  Node* cmpxy = NULL;
2121  Node* cmpyx = NULL;
2122  for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
2123    Node* cmp = start_from->fast_out(k);
2124    if (cmp->outcnt() > 0 &&            // must have prior uses
2125        cmp->in(0) == NULL &&           // must be context-independent
2126        cmp->Opcode() == cmp_op) {      // right kind of compare
2127      if (cmp->in(1) == xkey && cmp->in(2) == ykey)  cmpxy = cmp;
2128      if (cmp->in(1) == ykey && cmp->in(2) == xkey)  cmpyx = cmp;
2129    }
2130  }
2131
2132  const int NCMPS = 2;
2133  Node* cmps[NCMPS] = { cmpxy, cmpyx };
2134  int cmpn;
2135  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2136    if (cmps[cmpn] != NULL)  break;     // find a result
2137  }
2138  if (cmpn < NCMPS) {
2139    // Look for a dominating test that tells us the min and max.
2140    int depth = 0;                // Limit search depth for speed
2141    Node* dom = control();
2142    for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
2143      if (++depth >= 100)  break;
2144      Node* ifproj = dom;
2145      if (!ifproj->is_Proj())  continue;
2146      Node* iff = ifproj->in(0);
2147      if (!iff->is_If())  continue;
2148      Node* bol = iff->in(1);
2149      if (!bol->is_Bool())  continue;
2150      Node* cmp = bol->in(1);
2151      if (cmp == NULL)  continue;
2152      for (cmpn = 0; cmpn < NCMPS; cmpn++)
2153        if (cmps[cmpn] == cmp)  break;
2154      if (cmpn == NCMPS)  continue;
2155      BoolTest::mask btest = bol->as_Bool()->_test._test;
2156      if (ifproj->is_IfFalse())  btest = BoolTest(btest).negate();
2157      if (cmp->in(1) == ykey)    btest = BoolTest(btest).commute();
2158      // At this point, we know that 'x btest y' is true.
2159      switch (btest) {
2160      case BoolTest::eq:
2161        // They are proven equal, so we can collapse the min/max.
2162        // Either value is the answer.  Choose the simpler.
2163        if (is_simple_name(yvalue) && !is_simple_name(xvalue))
2164          return yvalue;
2165        return xvalue;
2166      case BoolTest::lt:          // x < y
2167      case BoolTest::le:          // x <= y
2168        return (want_max ? yvalue : xvalue);
2169      case BoolTest::gt:          // x > y
2170      case BoolTest::ge:          // x >= y
2171        return (want_max ? xvalue : yvalue);
2172      }
2173    }
2174  }
2175
2176  // We failed to find a dominating test.
2177  // Let's pick a test that might GVN with prior tests.
2178  Node*          best_bol   = NULL;
2179  BoolTest::mask best_btest = BoolTest::illegal;
2180  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
2181    Node* cmp = cmps[cmpn];
2182    if (cmp == NULL)  continue;
2183    for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
2184      Node* bol = cmp->fast_out(j);
2185      if (!bol->is_Bool())  continue;
2186      BoolTest::mask btest = bol->as_Bool()->_test._test;
2187      if (btest == BoolTest::eq || btest == BoolTest::ne)  continue;
2188      if (cmp->in(1) == ykey)   btest = BoolTest(btest).commute();
2189      if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
2190        best_bol   = bol->as_Bool();
2191        best_btest = btest;
2192      }
2193    }
2194  }
2195
2196  Node* answer_if_true  = NULL;
2197  Node* answer_if_false = NULL;
2198  switch (best_btest) {
2199  default:
2200    if (cmpxy == NULL)
2201      cmpxy = ideal_cmpxy;
2202    best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt));
2203    // and fall through:
2204  case BoolTest::lt:          // x < y
2205  case BoolTest::le:          // x <= y
2206    answer_if_true  = (want_max ? yvalue : xvalue);
2207    answer_if_false = (want_max ? xvalue : yvalue);
2208    break;
2209  case BoolTest::gt:          // x > y
2210  case BoolTest::ge:          // x >= y
2211    answer_if_true  = (want_max ? xvalue : yvalue);
2212    answer_if_false = (want_max ? yvalue : xvalue);
2213    break;
2214  }
2215
2216  jint hi, lo;
2217  if (want_max) {
2218    // We can sharpen the minimum.
2219    hi = MAX2(txvalue->_hi, tyvalue->_hi);
2220    lo = MAX2(txvalue->_lo, tyvalue->_lo);
2221  } else {
2222    // We can sharpen the maximum.
2223    hi = MIN2(txvalue->_hi, tyvalue->_hi);
2224    lo = MIN2(txvalue->_lo, tyvalue->_lo);
2225  }
2226
2227  // Use a flow-free graph structure, to avoid creating excess control edges
2228  // which could hinder other optimizations.
2229  // Since Math.min/max is often used with arraycopy, we want
2230  // tightly_coupled_allocation to be able to see beyond min/max expressions.
2231  Node* cmov = CMoveNode::make(C, NULL, best_bol,
2232                               answer_if_false, answer_if_true,
2233                               TypeInt::make(lo, hi, widen));
2234
2235  return _gvn.transform(cmov);
2236
2237  /*
2238  // This is not as desirable as it may seem, since Min and Max
2239  // nodes do not have a full set of optimizations.
2240  // And they would interfere, anyway, with 'if' optimizations
2241  // and with CMoveI canonical forms.
2242  switch (id) {
2243  case vmIntrinsics::_min:
2244    result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
2245  case vmIntrinsics::_max:
2246    result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
2247  default:
2248    ShouldNotReachHere();
2249  }
2250  */
2251}
2252
2253inline int
2254LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
2255  const TypePtr* base_type = TypePtr::NULL_PTR;
2256  if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
2257  if (base_type == NULL) {
2258    // Unknown type.
2259    return Type::AnyPtr;
2260  } else if (base_type == TypePtr::NULL_PTR) {
2261    // Since this is a NULL+long form, we have to switch to a rawptr.
2262    base   = _gvn.transform(new (C) CastX2PNode(offset));
2263    offset = MakeConX(0);
2264    return Type::RawPtr;
2265  } else if (base_type->base() == Type::RawPtr) {
2266    return Type::RawPtr;
2267  } else if (base_type->isa_oopptr()) {
2268    // Base is never null => always a heap address.
2269    if (base_type->ptr() == TypePtr::NotNull) {
2270      return Type::OopPtr;
2271    }
2272    // Offset is small => always a heap address.
2273    const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
2274    if (offset_type != NULL &&
2275        base_type->offset() == 0 &&     // (should always be?)
2276        offset_type->_lo >= 0 &&
2277        !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
2278      return Type::OopPtr;
2279    }
2280    // Otherwise, it might either be oop+off or NULL+addr.
2281    return Type::AnyPtr;
2282  } else {
2283    // No information:
2284    return Type::AnyPtr;
2285  }
2286}
2287
2288inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
2289  int kind = classify_unsafe_addr(base, offset);
2290  if (kind == Type::RawPtr) {
2291    return basic_plus_adr(top(), base, offset);
2292  } else {
2293    return basic_plus_adr(base, offset);
2294  }
2295}
2296
2297//--------------------------inline_number_methods-----------------------------
2298// inline int     Integer.numberOfLeadingZeros(int)
2299// inline int        Long.numberOfLeadingZeros(long)
2300//
2301// inline int     Integer.numberOfTrailingZeros(int)
2302// inline int        Long.numberOfTrailingZeros(long)
2303//
2304// inline int     Integer.bitCount(int)
2305// inline int        Long.bitCount(long)
2306//
2307// inline char  Character.reverseBytes(char)
2308// inline short     Short.reverseBytes(short)
2309// inline int     Integer.reverseBytes(int)
2310// inline long       Long.reverseBytes(long)
2311bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
2312  Node* arg = argument(0);
2313  Node* n;
2314  switch (id) {
2315  case vmIntrinsics::_numberOfLeadingZeros_i:   n = new (C) CountLeadingZerosINode( arg);  break;
2316  case vmIntrinsics::_numberOfLeadingZeros_l:   n = new (C) CountLeadingZerosLNode( arg);  break;
2317  case vmIntrinsics::_numberOfTrailingZeros_i:  n = new (C) CountTrailingZerosINode(arg);  break;
2318  case vmIntrinsics::_numberOfTrailingZeros_l:  n = new (C) CountTrailingZerosLNode(arg);  break;
2319  case vmIntrinsics::_bitCount_i:               n = new (C) PopCountINode(          arg);  break;
2320  case vmIntrinsics::_bitCount_l:               n = new (C) PopCountLNode(          arg);  break;
2321  case vmIntrinsics::_reverseBytes_c:           n = new (C) ReverseBytesUSNode(0,   arg);  break;
2322  case vmIntrinsics::_reverseBytes_s:           n = new (C) ReverseBytesSNode( 0,   arg);  break;
2323  case vmIntrinsics::_reverseBytes_i:           n = new (C) ReverseBytesINode( 0,   arg);  break;
2324  case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
2325  default:  fatal_unexpected_iid(id);  break;
2326  }
2327  set_result(_gvn.transform(n));
2328  return true;
2329}
2330
2331//----------------------------inline_unsafe_access----------------------------
2332
2333const static BasicType T_ADDRESS_HOLDER = T_LONG;
2334
2335// Helper that guards and inserts a pre-barrier.
2336void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
2337                                        Node* pre_val, bool need_mem_bar) {
2338  // We could be accessing the referent field of a reference object. If so, when G1
2339  // is enabled, we need to log the value in the referent field in an SATB buffer.
2340  // This routine performs some compile time filters and generates suitable
2341  // runtime filters that guard the pre-barrier code.
2342  // Also add memory barrier for non volatile load from the referent field
2343  // to prevent commoning of loads across safepoint.
2344  if (!UseG1GC && !need_mem_bar)
2345    return;
2346
2347  // Some compile time checks.
2348
2349  // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
2350  const TypeX* otype = offset->find_intptr_t_type();
2351  if (otype != NULL && otype->is_con() &&
2352      otype->get_con() != java_lang_ref_Reference::referent_offset) {
2353    // Constant offset but not the reference_offset so just return
2354    return;
2355  }
2356
2357  // We only need to generate the runtime guards for instances.
2358  const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
2359  if (btype != NULL) {
2360    if (btype->isa_aryptr()) {
2361      // Array type so nothing to do
2362      return;
2363    }
2364
2365    const TypeInstPtr* itype = btype->isa_instptr();
2366    if (itype != NULL) {
2367      // Can the klass of base_oop be statically determined to be
2368      // _not_ a sub-class of Reference and _not_ Object?
2369      ciKlass* klass = itype->klass();
2370      if ( klass->is_loaded() &&
2371          !klass->is_subtype_of(env()->Reference_klass()) &&
2372          !env()->Object_klass()->is_subtype_of(klass)) {
2373        return;
2374      }
2375    }
2376  }
2377
2378  // The compile time filters did not reject base_oop/offset so
2379  // we need to generate the following runtime filters
2380  //
2381  // if (offset == java_lang_ref_Reference::_reference_offset) {
2382  //   if (instance_of(base, java.lang.ref.Reference)) {
2383  //     pre_barrier(_, pre_val, ...);
2384  //   }
2385  // }
2386
2387  float likely   = PROB_LIKELY(  0.999);
2388  float unlikely = PROB_UNLIKELY(0.999);
2389
2390  IdealKit ideal(this);
2391#define __ ideal.
2392
2393  Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
2394
2395  __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
2396      // Update graphKit memory and control from IdealKit.
2397      sync_kit(ideal);
2398
2399      Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
2400      Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
2401
2402      // Update IdealKit memory and control from graphKit.
2403      __ sync_kit(this);
2404
2405      Node* one = __ ConI(1);
2406      // is_instof == 0 if base_oop == NULL
2407      __ if_then(is_instof, BoolTest::eq, one, unlikely); {
2408
2409        // Update graphKit from IdeakKit.
2410        sync_kit(ideal);
2411
2412        // Use the pre-barrier to record the value in the referent field
2413        pre_barrier(false /* do_load */,
2414                    __ ctrl(),
2415                    NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
2416                    pre_val /* pre_val */,
2417                    T_OBJECT);
2418        if (need_mem_bar) {
2419          // Add memory barrier to prevent commoning reads from this field
2420          // across safepoint since GC can change its value.
2421          insert_mem_bar(Op_MemBarCPUOrder);
2422        }
2423        // Update IdealKit from graphKit.
2424        __ sync_kit(this);
2425
2426      } __ end_if(); // _ref_type != ref_none
2427  } __ end_if(); // offset == referent_offset
2428
2429  // Final sync IdealKit and GraphKit.
2430  final_sync(ideal);
2431#undef __
2432}
2433
2434
2435// Interpret Unsafe.fieldOffset cookies correctly:
2436extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
2437
2438const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
2439  // Attempt to infer a sharper value type from the offset and base type.
2440  ciKlass* sharpened_klass = NULL;
2441
2442  // See if it is an instance field, with an object type.
2443  if (alias_type->field() != NULL) {
2444    assert(!is_native_ptr, "native pointer op cannot use a java address");
2445    if (alias_type->field()->type()->is_klass()) {
2446      sharpened_klass = alias_type->field()->type()->as_klass();
2447    }
2448  }
2449
2450  // See if it is a narrow oop array.
2451  if (adr_type->isa_aryptr()) {
2452    if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
2453      const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2454      if (elem_type != NULL) {
2455        sharpened_klass = elem_type->klass();
2456      }
2457    }
2458  }
2459
2460  // The sharpened class might be unloaded if there is no class loader
2461  // contraint in place.
2462  if (sharpened_klass != NULL && sharpened_klass->is_loaded()) {
2463    const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2464
2465#ifndef PRODUCT
2466    if (C->print_intrinsics() || C->print_inlining()) {
2467      tty->print("  from base type: ");  adr_type->dump();
2468      tty->print("  sharpened value: ");  tjp->dump();
2469    }
2470#endif
2471    // Sharpen the value type.
2472    return tjp;
2473  }
2474  return NULL;
2475}
2476
2477bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
2478  if (callee()->is_static())  return false;  // caller must have the capability!
2479
2480#ifndef PRODUCT
2481  {
2482    ResourceMark rm;
2483    // Check the signatures.
2484    ciSignature* sig = callee()->signature();
2485#ifdef ASSERT
2486    if (!is_store) {
2487      // Object getObject(Object base, int/long offset), etc.
2488      BasicType rtype = sig->return_type()->basic_type();
2489      if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
2490          rtype = T_ADDRESS;  // it is really a C void*
2491      assert(rtype == type, "getter must return the expected value");
2492      if (!is_native_ptr) {
2493        assert(sig->count() == 2, "oop getter has 2 arguments");
2494        assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
2495        assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
2496      } else {
2497        assert(sig->count() == 1, "native getter has 1 argument");
2498        assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
2499      }
2500    } else {
2501      // void putObject(Object base, int/long offset, Object x), etc.
2502      assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
2503      if (!is_native_ptr) {
2504        assert(sig->count() == 3, "oop putter has 3 arguments");
2505        assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
2506        assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
2507      } else {
2508        assert(sig->count() == 2, "native putter has 2 arguments");
2509        assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
2510      }
2511      BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
2512      if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
2513        vtype = T_ADDRESS;  // it is really a C void*
2514      assert(vtype == type, "putter must accept the expected value");
2515    }
2516#endif // ASSERT
2517 }
2518#endif //PRODUCT
2519
2520  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2521
2522  Node* receiver = argument(0);  // type: oop
2523
2524  // Build address expression.  See the code in inline_unsafe_prefetch.
2525  Node* adr;
2526  Node* heap_base_oop = top();
2527  Node* offset = top();
2528  Node* val;
2529
2530  if (!is_native_ptr) {
2531    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2532    Node* base = argument(1);  // type: oop
2533    // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2534    offset = argument(2);  // type: long
2535    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2536    // to be plain byte offsets, which are also the same as those accepted
2537    // by oopDesc::field_base.
2538    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2539           "fieldOffset must be byte-scaled");
2540    // 32-bit machines ignore the high half!
2541    offset = ConvL2X(offset);
2542    adr = make_unsafe_address(base, offset);
2543    heap_base_oop = base;
2544    val = is_store ? argument(4) : NULL;
2545  } else {
2546    Node* ptr = argument(1);  // type: long
2547    ptr = ConvL2X(ptr);  // adjust Java long to machine word
2548    adr = make_unsafe_address(NULL, ptr);
2549    val = is_store ? argument(3) : NULL;
2550  }
2551
2552  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2553
2554  // First guess at the value type.
2555  const Type *value_type = Type::get_const_basic_type(type);
2556
2557  // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2558  // there was not enough information to nail it down.
2559  Compile::AliasType* alias_type = C->alias_type(adr_type);
2560  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2561
2562  // We will need memory barriers unless we can determine a unique
2563  // alias category for this reference.  (Note:  If for some reason
2564  // the barriers get omitted and the unsafe reference begins to "pollute"
2565  // the alias analysis of the rest of the graph, either Compile::can_alias
2566  // or Compile::must_alias will throw a diagnostic assert.)
2567  bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2568
2569  // If we are reading the value of the referent field of a Reference
2570  // object (either by using Unsafe directly or through reflection)
2571  // then, if G1 is enabled, we need to record the referent in an
2572  // SATB log buffer using the pre-barrier mechanism.
2573  // Also we need to add memory barrier to prevent commoning reads
2574  // from this field across safepoint since GC can change its value.
2575  bool need_read_barrier = !is_native_ptr && !is_store &&
2576                           offset != top() && heap_base_oop != top();
2577
2578  if (!is_store && type == T_OBJECT) {
2579    const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
2580    if (tjp != NULL) {
2581      value_type = tjp;
2582    }
2583  }
2584
2585  receiver = null_check(receiver);
2586  if (stopped()) {
2587    return true;
2588  }
2589  // Heap pointers get a null-check from the interpreter,
2590  // as a courtesy.  However, this is not guaranteed by Unsafe,
2591  // and it is not possible to fully distinguish unintended nulls
2592  // from intended ones in this API.
2593
2594  if (is_volatile) {
2595    // We need to emit leading and trailing CPU membars (see below) in
2596    // addition to memory membars when is_volatile. This is a little
2597    // too strong, but avoids the need to insert per-alias-type
2598    // volatile membars (for stores; compare Parse::do_put_xxx), which
2599    // we cannot do effectively here because we probably only have a
2600    // rough approximation of type.
2601    need_mem_bar = true;
2602    // For Stores, place a memory ordering barrier now.
2603    if (is_store) {
2604      insert_mem_bar(Op_MemBarRelease);
2605    } else {
2606      if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2607        insert_mem_bar(Op_MemBarVolatile);
2608      }
2609    }
2610  }
2611
2612  // Memory barrier to prevent normal and 'unsafe' accesses from
2613  // bypassing each other.  Happens after null checks, so the
2614  // exception paths do not take memory state from the memory barrier,
2615  // so there's no problems making a strong assert about mixing users
2616  // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2617  // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2618  if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2619
2620  if (!is_store) {
2621    Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
2622    // load value
2623    switch (type) {
2624    case T_BOOLEAN:
2625    case T_CHAR:
2626    case T_BYTE:
2627    case T_SHORT:
2628    case T_INT:
2629    case T_LONG:
2630    case T_FLOAT:
2631    case T_DOUBLE:
2632      break;
2633    case T_OBJECT:
2634      if (need_read_barrier) {
2635        insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
2636      }
2637      break;
2638    case T_ADDRESS:
2639      // Cast to an int type.
2640      p = _gvn.transform(new (C) CastP2XNode(NULL, p));
2641      p = ConvX2UL(p);
2642      break;
2643    default:
2644      fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2645      break;
2646    }
2647    // The load node has the control of the preceding MemBarCPUOrder.  All
2648    // following nodes will have the control of the MemBarCPUOrder inserted at
2649    // the end of this method.  So, pushing the load onto the stack at a later
2650    // point is fine.
2651    set_result(p);
2652  } else {
2653    // place effect of store into memory
2654    switch (type) {
2655    case T_DOUBLE:
2656      val = dstore_rounding(val);
2657      break;
2658    case T_ADDRESS:
2659      // Repackage the long as a pointer.
2660      val = ConvL2X(val);
2661      val = _gvn.transform(new (C) CastX2PNode(val));
2662      break;
2663    }
2664
2665    MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2666    if (type != T_OBJECT ) {
2667      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2668    } else {
2669      // Possibly an oop being stored to Java heap or native memory
2670      if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2671        // oop to Java heap.
2672        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2673      } else {
2674        // We can't tell at compile time if we are storing in the Java heap or outside
2675        // of it. So we need to emit code to conditionally do the proper type of
2676        // store.
2677
2678        IdealKit ideal(this);
2679#define __ ideal.
2680        // QQQ who knows what probability is here??
2681        __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2682          // Sync IdealKit and graphKit.
2683          sync_kit(ideal);
2684          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2685          // Update IdealKit memory.
2686          __ sync_kit(this);
2687        } __ else_(); {
2688          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
2689        } __ end_if();
2690        // Final sync IdealKit and GraphKit.
2691        final_sync(ideal);
2692#undef __
2693      }
2694    }
2695  }
2696
2697  if (is_volatile) {
2698    if (!is_store) {
2699      insert_mem_bar(Op_MemBarAcquire);
2700    } else {
2701      if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2702        insert_mem_bar(Op_MemBarVolatile);
2703      }
2704    }
2705  }
2706
2707  if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2708
2709  return true;
2710}
2711
2712//----------------------------inline_unsafe_prefetch----------------------------
2713
2714bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2715#ifndef PRODUCT
2716  {
2717    ResourceMark rm;
2718    // Check the signatures.
2719    ciSignature* sig = callee()->signature();
2720#ifdef ASSERT
2721    // Object getObject(Object base, int/long offset), etc.
2722    BasicType rtype = sig->return_type()->basic_type();
2723    if (!is_native_ptr) {
2724      assert(sig->count() == 2, "oop prefetch has 2 arguments");
2725      assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
2726      assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
2727    } else {
2728      assert(sig->count() == 1, "native prefetch has 1 argument");
2729      assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
2730    }
2731#endif // ASSERT
2732  }
2733#endif // !PRODUCT
2734
2735  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2736
2737  const int idx = is_static ? 0 : 1;
2738  if (!is_static) {
2739    null_check_receiver();
2740    if (stopped()) {
2741      return true;
2742    }
2743  }
2744
2745  // Build address expression.  See the code in inline_unsafe_access.
2746  Node *adr;
2747  if (!is_native_ptr) {
2748    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2749    Node* base   = argument(idx + 0);  // type: oop
2750    // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2751    Node* offset = argument(idx + 1);  // type: long
2752    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2753    // to be plain byte offsets, which are also the same as those accepted
2754    // by oopDesc::field_base.
2755    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2756           "fieldOffset must be byte-scaled");
2757    // 32-bit machines ignore the high half!
2758    offset = ConvL2X(offset);
2759    adr = make_unsafe_address(base, offset);
2760  } else {
2761    Node* ptr = argument(idx + 0);  // type: long
2762    ptr = ConvL2X(ptr);  // adjust Java long to machine word
2763    adr = make_unsafe_address(NULL, ptr);
2764  }
2765
2766  // Generate the read or write prefetch
2767  Node *prefetch;
2768  if (is_store) {
2769    prefetch = new (C) PrefetchWriteNode(i_o(), adr);
2770  } else {
2771    prefetch = new (C) PrefetchReadNode(i_o(), adr);
2772  }
2773  prefetch->init_req(0, control());
2774  set_i_o(_gvn.transform(prefetch));
2775
2776  return true;
2777}
2778
2779//----------------------------inline_unsafe_load_store----------------------------
2780// This method serves a couple of different customers (depending on LoadStoreKind):
2781//
2782// LS_cmpxchg:
2783//   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
2784//   public final native boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
2785//   public final native boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
2786//
2787// LS_xadd:
2788//   public int  getAndAddInt( Object o, long offset, int  delta)
2789//   public long getAndAddLong(Object o, long offset, long delta)
2790//
2791// LS_xchg:
2792//   int    getAndSet(Object o, long offset, int    newValue)
2793//   long   getAndSet(Object o, long offset, long   newValue)
2794//   Object getAndSet(Object o, long offset, Object newValue)
2795//
2796bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
2797  // This basic scheme here is the same as inline_unsafe_access, but
2798  // differs in enough details that combining them would make the code
2799  // overly confusing.  (This is a true fact! I originally combined
2800  // them, but even I was confused by it!) As much code/comments as
2801  // possible are retained from inline_unsafe_access though to make
2802  // the correspondences clearer. - dl
2803
2804  if (callee()->is_static())  return false;  // caller must have the capability!
2805
2806#ifndef PRODUCT
2807  BasicType rtype;
2808  {
2809    ResourceMark rm;
2810    // Check the signatures.
2811    ciSignature* sig = callee()->signature();
2812    rtype = sig->return_type()->basic_type();
2813    if (kind == LS_xadd || kind == LS_xchg) {
2814      // Check the signatures.
2815#ifdef ASSERT
2816      assert(rtype == type, "get and set must return the expected type");
2817      assert(sig->count() == 3, "get and set has 3 arguments");
2818      assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
2819      assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
2820      assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
2821#endif // ASSERT
2822    } else if (kind == LS_cmpxchg) {
2823      // Check the signatures.
2824#ifdef ASSERT
2825      assert(rtype == T_BOOLEAN, "CAS must return boolean");
2826      assert(sig->count() == 4, "CAS has 4 arguments");
2827      assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2828      assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2829#endif // ASSERT
2830    } else {
2831      ShouldNotReachHere();
2832    }
2833  }
2834#endif //PRODUCT
2835
2836  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2837
2838  // Get arguments:
2839  Node* receiver = NULL;
2840  Node* base     = NULL;
2841  Node* offset   = NULL;
2842  Node* oldval   = NULL;
2843  Node* newval   = NULL;
2844  if (kind == LS_cmpxchg) {
2845    const bool two_slot_type = type2size[type] == 2;
2846    receiver = argument(0);  // type: oop
2847    base     = argument(1);  // type: oop
2848    offset   = argument(2);  // type: long
2849    oldval   = argument(4);  // type: oop, int, or long
2850    newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
2851  } else if (kind == LS_xadd || kind == LS_xchg){
2852    receiver = argument(0);  // type: oop
2853    base     = argument(1);  // type: oop
2854    offset   = argument(2);  // type: long
2855    oldval   = NULL;
2856    newval   = argument(4);  // type: oop, int, or long
2857  }
2858
2859  // Null check receiver.
2860  receiver = null_check(receiver);
2861  if (stopped()) {
2862    return true;
2863  }
2864
2865  // Build field offset expression.
2866  // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2867  // to be plain byte offsets, which are also the same as those accepted
2868  // by oopDesc::field_base.
2869  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2870  // 32-bit machines ignore the high half of long offsets
2871  offset = ConvL2X(offset);
2872  Node* adr = make_unsafe_address(base, offset);
2873  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2874
2875  // For CAS, unlike inline_unsafe_access, there seems no point in
2876  // trying to refine types. Just use the coarse types here.
2877  const Type *value_type = Type::get_const_basic_type(type);
2878  Compile::AliasType* alias_type = C->alias_type(adr_type);
2879  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2880
2881  if (kind == LS_xchg && type == T_OBJECT) {
2882    const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
2883    if (tjp != NULL) {
2884      value_type = tjp;
2885    }
2886  }
2887
2888  int alias_idx = C->get_alias_index(adr_type);
2889
2890  // Memory-model-wise, a LoadStore acts like a little synchronized
2891  // block, so needs barriers on each side.  These don't translate
2892  // into actual barriers on most machines, but we still need rest of
2893  // compiler to respect ordering.
2894
2895  insert_mem_bar(Op_MemBarRelease);
2896  insert_mem_bar(Op_MemBarCPUOrder);
2897
2898  // 4984716: MemBars must be inserted before this
2899  //          memory node in order to avoid a false
2900  //          dependency which will confuse the scheduler.
2901  Node *mem = memory(alias_idx);
2902
2903  // For now, we handle only those cases that actually exist: ints,
2904  // longs, and Object. Adding others should be straightforward.
2905  Node* load_store;
2906  switch(type) {
2907  case T_INT:
2908    if (kind == LS_xadd) {
2909      load_store = _gvn.transform(new (C) GetAndAddINode(control(), mem, adr, newval, adr_type));
2910    } else if (kind == LS_xchg) {
2911      load_store = _gvn.transform(new (C) GetAndSetINode(control(), mem, adr, newval, adr_type));
2912    } else if (kind == LS_cmpxchg) {
2913      load_store = _gvn.transform(new (C) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2914    } else {
2915      ShouldNotReachHere();
2916    }
2917    break;
2918  case T_LONG:
2919    if (kind == LS_xadd) {
2920      load_store = _gvn.transform(new (C) GetAndAddLNode(control(), mem, adr, newval, adr_type));
2921    } else if (kind == LS_xchg) {
2922      load_store = _gvn.transform(new (C) GetAndSetLNode(control(), mem, adr, newval, adr_type));
2923    } else if (kind == LS_cmpxchg) {
2924      load_store = _gvn.transform(new (C) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2925    } else {
2926      ShouldNotReachHere();
2927    }
2928    break;
2929  case T_OBJECT:
2930    // Transformation of a value which could be NULL pointer (CastPP #NULL)
2931    // could be delayed during Parse (for example, in adjust_map_after_if()).
2932    // Execute transformation here to avoid barrier generation in such case.
2933    if (_gvn.type(newval) == TypePtr::NULL_PTR)
2934      newval = _gvn.makecon(TypePtr::NULL_PTR);
2935
2936    // Reference stores need a store barrier.
2937    if (kind == LS_xchg) {
2938      // If pre-barrier must execute before the oop store, old value will require do_load here.
2939      if (!can_move_pre_barrier()) {
2940        pre_barrier(true /* do_load*/,
2941                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
2942                    NULL /* pre_val*/,
2943                    T_OBJECT);
2944      } // Else move pre_barrier to use load_store value, see below.
2945    } else if (kind == LS_cmpxchg) {
2946      // Same as for newval above:
2947      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
2948        oldval = _gvn.makecon(TypePtr::NULL_PTR);
2949      }
2950      // The only known value which might get overwritten is oldval.
2951      pre_barrier(false /* do_load */,
2952                  control(), NULL, NULL, max_juint, NULL, NULL,
2953                  oldval /* pre_val */,
2954                  T_OBJECT);
2955    } else {
2956      ShouldNotReachHere();
2957    }
2958
2959#ifdef _LP64
2960    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2961      Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2962      if (kind == LS_xchg) {
2963        load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2964                                                           newval_enc, adr_type, value_type->make_narrowoop()));
2965      } else {
2966        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2967        Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2968        load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2969                                                                newval_enc, oldval_enc));
2970      }
2971    } else
2972#endif
2973    {
2974      if (kind == LS_xchg) {
2975        load_store = _gvn.transform(new (C) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
2976      } else {
2977        assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2978        load_store = _gvn.transform(new (C) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2979      }
2980    }
2981    post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
2982    break;
2983  default:
2984    fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
2985    break;
2986  }
2987
2988  // SCMemProjNodes represent the memory state of a LoadStore. Their
2989  // main role is to prevent LoadStore nodes from being optimized away
2990  // when their results aren't used.
2991  Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
2992  set_memory(proj, alias_idx);
2993
2994  if (type == T_OBJECT && kind == LS_xchg) {
2995#ifdef _LP64
2996    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2997      load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
2998    }
2999#endif
3000    if (can_move_pre_barrier()) {
3001      // Don't need to load pre_val. The old value is returned by load_store.
3002      // The pre_barrier can execute after the xchg as long as no safepoint
3003      // gets inserted between them.
3004      pre_barrier(false /* do_load */,
3005                  control(), NULL, NULL, max_juint, NULL, NULL,
3006                  load_store /* pre_val */,
3007                  T_OBJECT);
3008    }
3009  }
3010
3011  // Add the trailing membar surrounding the access
3012  insert_mem_bar(Op_MemBarCPUOrder);
3013  insert_mem_bar(Op_MemBarAcquire);
3014
3015  assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
3016  set_result(load_store);
3017  return true;
3018}
3019
3020//----------------------------inline_unsafe_ordered_store----------------------
3021// public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
3022// public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
3023// public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
3024bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
3025  // This is another variant of inline_unsafe_access, differing in
3026  // that it always issues store-store ("release") barrier and ensures
3027  // store-atomicity (which only matters for "long").
3028
3029  if (callee()->is_static())  return false;  // caller must have the capability!
3030
3031#ifndef PRODUCT
3032  {
3033    ResourceMark rm;
3034    // Check the signatures.
3035    ciSignature* sig = callee()->signature();
3036#ifdef ASSERT
3037    BasicType rtype = sig->return_type()->basic_type();
3038    assert(rtype == T_VOID, "must return void");
3039    assert(sig->count() == 3, "has 3 arguments");
3040    assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
3041    assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
3042#endif // ASSERT
3043  }
3044#endif //PRODUCT
3045
3046  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3047
3048  // Get arguments:
3049  Node* receiver = argument(0);  // type: oop
3050  Node* base     = argument(1);  // type: oop
3051  Node* offset   = argument(2);  // type: long
3052  Node* val      = argument(4);  // type: oop, int, or long
3053
3054  // Null check receiver.
3055  receiver = null_check(receiver);
3056  if (stopped()) {
3057    return true;
3058  }
3059
3060  // Build field offset expression.
3061  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
3062  // 32-bit machines ignore the high half of long offsets
3063  offset = ConvL2X(offset);
3064  Node* adr = make_unsafe_address(base, offset);
3065  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
3066  const Type *value_type = Type::get_const_basic_type(type);
3067  Compile::AliasType* alias_type = C->alias_type(adr_type);
3068
3069  insert_mem_bar(Op_MemBarRelease);
3070  insert_mem_bar(Op_MemBarCPUOrder);
3071  // Ensure that the store is atomic for longs:
3072  const bool require_atomic_access = true;
3073  Node* store;
3074  if (type == T_OBJECT) // reference stores need a store barrier.
3075    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3076  else {
3077    store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3078  }
3079  insert_mem_bar(Op_MemBarCPUOrder);
3080  return true;
3081}
3082
3083bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
3084  // Regardless of form, don't allow previous ld/st to move down,
3085  // then issue acquire, release, or volatile mem_bar.
3086  insert_mem_bar(Op_MemBarCPUOrder);
3087  switch(id) {
3088    case vmIntrinsics::_loadFence:
3089      insert_mem_bar(Op_LoadFence);
3090      return true;
3091    case vmIntrinsics::_storeFence:
3092      insert_mem_bar(Op_StoreFence);
3093      return true;
3094    case vmIntrinsics::_fullFence:
3095      insert_mem_bar(Op_MemBarVolatile);
3096      return true;
3097    default:
3098      fatal_unexpected_iid(id);
3099      return false;
3100  }
3101}
3102
3103bool LibraryCallKit::klass_needs_init_guard(Node* kls) {
3104  if (!kls->is_Con()) {
3105    return true;
3106  }
3107  const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr();
3108  if (klsptr == NULL) {
3109    return true;
3110  }
3111  ciInstanceKlass* ik = klsptr->klass()->as_instance_klass();
3112  // don't need a guard for a klass that is already initialized
3113  return !ik->is_initialized();
3114}
3115
3116//----------------------------inline_unsafe_allocate---------------------------
3117// public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls);
3118bool LibraryCallKit::inline_unsafe_allocate() {
3119  if (callee()->is_static())  return false;  // caller must have the capability!
3120
3121  null_check_receiver();  // null-check, then ignore
3122  Node* cls = null_check(argument(1));
3123  if (stopped())  return true;
3124
3125  Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3126  kls = null_check(kls);
3127  if (stopped())  return true;  // argument was like int.class
3128
3129  Node* test = NULL;
3130  if (LibraryCallKit::klass_needs_init_guard(kls)) {
3131    // Note:  The argument might still be an illegal value like
3132    // Serializable.class or Object[].class.   The runtime will handle it.
3133    // But we must make an explicit check for initialization.
3134    Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3135    // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3136    // can generate code to load it as unsigned byte.
3137    Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3138    Node* bits = intcon(InstanceKlass::fully_initialized);
3139    test = _gvn.transform(new (C) SubINode(inst, bits));
3140    // The 'test' is non-zero if we need to take a slow path.
3141  }
3142
3143  Node* obj = new_instance(kls, test);
3144  set_result(obj);
3145  return true;
3146}
3147
3148#ifdef TRACE_HAVE_INTRINSICS
3149/*
3150 * oop -> myklass
3151 * myklass->trace_id |= USED
3152 * return myklass->trace_id & ~0x3
3153 */
3154bool LibraryCallKit::inline_native_classID() {
3155  null_check_receiver();  // null-check, then ignore
3156  Node* cls = null_check(argument(1), T_OBJECT);
3157  Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3158  kls = null_check(kls, T_OBJECT);
3159  ByteSize offset = TRACE_ID_OFFSET;
3160  Node* insp = basic_plus_adr(kls, in_bytes(offset));
3161  Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3162  Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3163  Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
3164  Node* clsused = longcon(0x01l); // set the class bit
3165  Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3166
3167  const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3168  store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3169  set_result(andl);
3170  return true;
3171}
3172
3173bool LibraryCallKit::inline_native_threadID() {
3174  Node* tls_ptr = NULL;
3175  Node* cur_thr = generate_current_thread(tls_ptr);
3176  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3177  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3178  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
3179
3180  Node* threadid = NULL;
3181  size_t thread_id_size = OSThread::thread_id_size();
3182  if (thread_id_size == (size_t) BytesPerLong) {
3183    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
3184  } else if (thread_id_size == (size_t) BytesPerInt) {
3185    threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
3186  } else {
3187    ShouldNotReachHere();
3188  }
3189  set_result(threadid);
3190  return true;
3191}
3192#endif
3193
3194//------------------------inline_native_time_funcs--------------
3195// inline code for System.currentTimeMillis() and System.nanoTime()
3196// these have the same type and signature
3197bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
3198  const TypeFunc* tf = OptoRuntime::void_long_Type();
3199  const TypePtr* no_memory_effects = NULL;
3200  Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
3201  Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
3202#ifdef ASSERT
3203  Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1));
3204  assert(value_top == top(), "second value must be top");
3205#endif
3206  set_result(value);
3207  return true;
3208}
3209
3210//------------------------inline_native_currentThread------------------
3211bool LibraryCallKit::inline_native_currentThread() {
3212  Node* junk = NULL;
3213  set_result(generate_current_thread(junk));
3214  return true;
3215}
3216
3217//------------------------inline_native_isInterrupted------------------
3218// private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
3219bool LibraryCallKit::inline_native_isInterrupted() {
3220  // Add a fast path to t.isInterrupted(clear_int):
3221  //   (t == Thread.current() &&
3222  //    (!TLS._osthread._interrupted || WINDOWS_ONLY(false) NOT_WINDOWS(!clear_int)))
3223  //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
3224  // So, in the common case that the interrupt bit is false,
3225  // we avoid making a call into the VM.  Even if the interrupt bit
3226  // is true, if the clear_int argument is false, we avoid the VM call.
3227  // However, if the receiver is not currentThread, we must call the VM,
3228  // because there must be some locking done around the operation.
3229
3230  // We only go to the fast case code if we pass two guards.
3231  // Paths which do not pass are accumulated in the slow_region.
3232
3233  enum {
3234    no_int_result_path   = 1, // t == Thread.current() && !TLS._osthread._interrupted
3235    no_clear_result_path = 2, // t == Thread.current() &&  TLS._osthread._interrupted && !clear_int
3236    slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
3237    PATH_LIMIT
3238  };
3239
3240  // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
3241  // out of the function.
3242  insert_mem_bar(Op_MemBarCPUOrder);
3243
3244  RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
3245  PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
3246
3247  RegionNode* slow_region = new (C) RegionNode(1);
3248  record_for_igvn(slow_region);
3249
3250  // (a) Receiving thread must be the current thread.
3251  Node* rec_thr = argument(0);
3252  Node* tls_ptr = NULL;
3253  Node* cur_thr = generate_current_thread(tls_ptr);
3254  Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr));
3255  Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne));
3256
3257  generate_slow_guard(bol_thr, slow_region);
3258
3259  // (b) Interrupt bit on TLS must be false.
3260  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3261  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3262  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3263
3264  // Set the control input on the field _interrupted read to prevent it floating up.
3265  Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3266  Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3267  Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3268
3269  IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3270
3271  // First fast path:  if (!TLS._interrupted) return false;
3272  Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit));
3273  result_rgn->init_req(no_int_result_path, false_bit);
3274  result_val->init_req(no_int_result_path, intcon(0));
3275
3276  // drop through to next case
3277  set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)));
3278
3279#ifndef TARGET_OS_FAMILY_windows
3280  // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
3281  Node* clr_arg = argument(1);
3282  Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0)));
3283  Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne));
3284  IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
3285
3286  // Second fast path:  ... else if (!clear_int) return true;
3287  Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg));
3288  result_rgn->init_req(no_clear_result_path, false_arg);
3289  result_val->init_req(no_clear_result_path, intcon(1));
3290
3291  // drop through to next case
3292  set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)));
3293#else
3294  // To return true on Windows you must read the _interrupted field
3295  // and check the the event state i.e. take the slow path.
3296#endif // TARGET_OS_FAMILY_windows
3297
3298  // (d) Otherwise, go to the slow path.
3299  slow_region->add_req(control());
3300  set_control( _gvn.transform(slow_region));
3301
3302  if (stopped()) {
3303    // There is no slow path.
3304    result_rgn->init_req(slow_result_path, top());
3305    result_val->init_req(slow_result_path, top());
3306  } else {
3307    // non-virtual because it is a private non-static
3308    CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
3309
3310    Node* slow_val = set_results_for_java_call(slow_call);
3311    // this->control() comes from set_results_for_java_call
3312
3313    Node* fast_io  = slow_call->in(TypeFunc::I_O);
3314    Node* fast_mem = slow_call->in(TypeFunc::Memory);
3315
3316    // These two phis are pre-filled with copies of of the fast IO and Memory
3317    PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
3318    PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
3319
3320    result_rgn->init_req(slow_result_path, control());
3321    result_io ->init_req(slow_result_path, i_o());
3322    result_mem->init_req(slow_result_path, reset_memory());
3323    result_val->init_req(slow_result_path, slow_val);
3324
3325    set_all_memory(_gvn.transform(result_mem));
3326    set_i_o(       _gvn.transform(result_io));
3327  }
3328
3329  C->set_has_split_ifs(true); // Has chance for split-if optimization
3330  set_result(result_rgn, result_val);
3331  return true;
3332}
3333
3334//---------------------------load_mirror_from_klass----------------------------
3335// Given a klass oop, load its java mirror (a java.lang.Class oop).
3336Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3337  Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3338  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3339}
3340
3341//-----------------------load_klass_from_mirror_common-------------------------
3342// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3343// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3344// and branch to the given path on the region.
3345// If never_see_null, take an uncommon trap on null, so we can optimistically
3346// compile for the non-null case.
3347// If the region is NULL, force never_see_null = true.
3348Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
3349                                                    bool never_see_null,
3350                                                    RegionNode* region,
3351                                                    int null_path,
3352                                                    int offset) {
3353  if (region == NULL)  never_see_null = true;
3354  Node* p = basic_plus_adr(mirror, offset);
3355  const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3356  Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
3357  Node* null_ctl = top();
3358  kls = null_check_oop(kls, &null_ctl, never_see_null);
3359  if (region != NULL) {
3360    // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
3361    region->init_req(null_path, null_ctl);
3362  } else {
3363    assert(null_ctl == top(), "no loose ends");
3364  }
3365  return kls;
3366}
3367
3368//--------------------(inline_native_Class_query helpers)---------------------
3369// Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
3370// Fall through if (mods & mask) == bits, take the guard otherwise.
3371Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3372  // Branch around if the given klass has the given modifier bit set.
3373  // Like generate_guard, adds a new path onto the region.
3374  Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3375  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3376  Node* mask = intcon(modifier_mask);
3377  Node* bits = intcon(modifier_bits);
3378  Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
3379  Node* cmp  = _gvn.transform(new (C) CmpINode(mbit, bits));
3380  Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
3381  return generate_fair_guard(bol, region);
3382}
3383Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3384  return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3385}
3386
3387//-------------------------inline_native_Class_query-------------------
3388bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3389  const Type* return_type = TypeInt::BOOL;
3390  Node* prim_return_value = top();  // what happens if it's a primitive class?
3391  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3392  bool expect_prim = false;     // most of these guys expect to work on refs
3393
3394  enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3395
3396  Node* mirror = argument(0);
3397  Node* obj    = top();
3398
3399  switch (id) {
3400  case vmIntrinsics::_isInstance:
3401    // nothing is an instance of a primitive type
3402    prim_return_value = intcon(0);
3403    obj = argument(1);
3404    break;
3405  case vmIntrinsics::_getModifiers:
3406    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3407    assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
3408    return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
3409    break;
3410  case vmIntrinsics::_isInterface:
3411    prim_return_value = intcon(0);
3412    break;
3413  case vmIntrinsics::_isArray:
3414    prim_return_value = intcon(0);
3415    expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
3416    break;
3417  case vmIntrinsics::_isPrimitive:
3418    prim_return_value = intcon(1);
3419    expect_prim = true;  // obviously
3420    break;
3421  case vmIntrinsics::_getSuperclass:
3422    prim_return_value = null();
3423    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3424    break;
3425  case vmIntrinsics::_getComponentType:
3426    prim_return_value = null();
3427    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
3428    break;
3429  case vmIntrinsics::_getClassAccessFlags:
3430    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
3431    return_type = TypeInt::INT;  // not bool!  6297094
3432    break;
3433  default:
3434    fatal_unexpected_iid(id);
3435    break;
3436  }
3437
3438  const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
3439  if (mirror_con == NULL)  return false;  // cannot happen?
3440
3441#ifndef PRODUCT
3442  if (C->print_intrinsics() || C->print_inlining()) {
3443    ciType* k = mirror_con->java_mirror_type();
3444    if (k) {
3445      tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
3446      k->print_name();
3447      tty->cr();
3448    }
3449  }
3450#endif
3451
3452  // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
3453  RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3454  record_for_igvn(region);
3455  PhiNode* phi = new (C) PhiNode(region, return_type);
3456
3457  // The mirror will never be null of Reflection.getClassAccessFlags, however
3458  // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
3459  // if it is. See bug 4774291.
3460
3461  // For Reflection.getClassAccessFlags(), the null check occurs in
3462  // the wrong place; see inline_unsafe_access(), above, for a similar
3463  // situation.
3464  mirror = null_check(mirror);
3465  // If mirror or obj is dead, only null-path is taken.
3466  if (stopped())  return true;
3467
3468  if (expect_prim)  never_see_null = false;  // expect nulls (meaning prims)
3469
3470  // Now load the mirror's klass metaobject, and null-check it.
3471  // Side-effects region with the control path if the klass is null.
3472  Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
3473  // If kls is null, we have a primitive mirror.
3474  phi->init_req(_prim_path, prim_return_value);
3475  if (stopped()) { set_result(region, phi); return true; }
3476  bool safe_for_replace = (region->in(_prim_path) == top());
3477
3478  Node* p;  // handy temp
3479  Node* null_ctl;
3480
3481  // Now that we have the non-null klass, we can perform the real query.
3482  // For constant classes, the query will constant-fold in LoadNode::Value.
3483  Node* query_value = top();
3484  switch (id) {
3485  case vmIntrinsics::_isInstance:
3486    // nothing is an instance of a primitive type
3487    query_value = gen_instanceof(obj, kls, safe_for_replace);
3488    break;
3489
3490  case vmIntrinsics::_getModifiers:
3491    p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3492    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3493    break;
3494
3495  case vmIntrinsics::_isInterface:
3496    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3497    if (generate_interface_guard(kls, region) != NULL)
3498      // A guard was added.  If the guard is taken, it was an interface.
3499      phi->add_req(intcon(1));
3500    // If we fall through, it's a plain class.
3501    query_value = intcon(0);
3502    break;
3503
3504  case vmIntrinsics::_isArray:
3505    // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
3506    if (generate_array_guard(kls, region) != NULL)
3507      // A guard was added.  If the guard is taken, it was an array.
3508      phi->add_req(intcon(1));
3509    // If we fall through, it's a plain class.
3510    query_value = intcon(0);
3511    break;
3512
3513  case vmIntrinsics::_isPrimitive:
3514    query_value = intcon(0); // "normal" path produces false
3515    break;
3516
3517  case vmIntrinsics::_getSuperclass:
3518    // The rules here are somewhat unfortunate, but we can still do better
3519    // with random logic than with a JNI call.
3520    // Interfaces store null or Object as _super, but must report null.
3521    // Arrays store an intermediate super as _super, but must report Object.
3522    // Other types can report the actual _super.
3523    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3524    if (generate_interface_guard(kls, region) != NULL)
3525      // A guard was added.  If the guard is taken, it was an interface.
3526      phi->add_req(null());
3527    if (generate_array_guard(kls, region) != NULL)
3528      // A guard was added.  If the guard is taken, it was an array.
3529      phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
3530    // If we fall through, it's a plain class.  Get its _super.
3531    p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
3532    kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
3533    null_ctl = top();
3534    kls = null_check_oop(kls, &null_ctl);
3535    if (null_ctl != top()) {
3536      // If the guard is taken, Object.superClass is null (both klass and mirror).
3537      region->add_req(null_ctl);
3538      phi   ->add_req(null());
3539    }
3540    if (!stopped()) {
3541      query_value = load_mirror_from_klass(kls);
3542    }
3543    break;
3544
3545  case vmIntrinsics::_getComponentType:
3546    if (generate_array_guard(kls, region) != NULL) {
3547      // Be sure to pin the oop load to the guard edge just created:
3548      Node* is_array_ctrl = region->in(region->req()-1);
3549      Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
3550      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3551      phi->add_req(cmo);
3552    }
3553    query_value = null();  // non-array case is null
3554    break;
3555
3556  case vmIntrinsics::_getClassAccessFlags:
3557    p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3558    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3559    break;
3560
3561  default:
3562    fatal_unexpected_iid(id);
3563    break;
3564  }
3565
3566  // Fall-through is the normal case of a query to a real class.
3567  phi->init_req(1, query_value);
3568  region->init_req(1, control());
3569
3570  C->set_has_split_ifs(true); // Has chance for split-if optimization
3571  set_result(region, phi);
3572  return true;
3573}
3574
3575//--------------------------inline_native_subtype_check------------------------
3576// This intrinsic takes the JNI calls out of the heart of
3577// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
3578bool LibraryCallKit::inline_native_subtype_check() {
3579  // Pull both arguments off the stack.
3580  Node* args[2];                // two java.lang.Class mirrors: superc, subc
3581  args[0] = argument(0);
3582  args[1] = argument(1);
3583  Node* klasses[2];             // corresponding Klasses: superk, subk
3584  klasses[0] = klasses[1] = top();
3585
3586  enum {
3587    // A full decision tree on {superc is prim, subc is prim}:
3588    _prim_0_path = 1,           // {P,N} => false
3589                                // {P,P} & superc!=subc => false
3590    _prim_same_path,            // {P,P} & superc==subc => true
3591    _prim_1_path,               // {N,P} => false
3592    _ref_subtype_path,          // {N,N} & subtype check wins => true
3593    _both_ref_path,             // {N,N} & subtype check loses => false
3594    PATH_LIMIT
3595  };
3596
3597  RegionNode* region = new (C) RegionNode(PATH_LIMIT);
3598  Node*       phi    = new (C) PhiNode(region, TypeInt::BOOL);
3599  record_for_igvn(region);
3600
3601  const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
3602  const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
3603  int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
3604
3605  // First null-check both mirrors and load each mirror's klass metaobject.
3606  int which_arg;
3607  for (which_arg = 0; which_arg <= 1; which_arg++) {
3608    Node* arg = args[which_arg];
3609    arg = null_check(arg);
3610    if (stopped())  break;
3611    args[which_arg] = arg;
3612
3613    Node* p = basic_plus_adr(arg, class_klass_offset);
3614    Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
3615    klasses[which_arg] = _gvn.transform(kls);
3616  }
3617
3618  // Having loaded both klasses, test each for null.
3619  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3620  for (which_arg = 0; which_arg <= 1; which_arg++) {
3621    Node* kls = klasses[which_arg];
3622    Node* null_ctl = top();
3623    kls = null_check_oop(kls, &null_ctl, never_see_null);
3624    int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
3625    region->init_req(prim_path, null_ctl);
3626    if (stopped())  break;
3627    klasses[which_arg] = kls;
3628  }
3629
3630  if (!stopped()) {
3631    // now we have two reference types, in klasses[0..1]
3632    Node* subk   = klasses[1];  // the argument to isAssignableFrom
3633    Node* superk = klasses[0];  // the receiver
3634    region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
3635    // now we have a successful reference subtype check
3636    region->set_req(_ref_subtype_path, control());
3637  }
3638
3639  // If both operands are primitive (both klasses null), then
3640  // we must return true when they are identical primitives.
3641  // It is convenient to test this after the first null klass check.
3642  set_control(region->in(_prim_0_path)); // go back to first null check
3643  if (!stopped()) {
3644    // Since superc is primitive, make a guard for the superc==subc case.
3645    Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1]));
3646    Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq));
3647    generate_guard(bol_eq, region, PROB_FAIR);
3648    if (region->req() == PATH_LIMIT+1) {
3649      // A guard was added.  If the added guard is taken, superc==subc.
3650      region->swap_edges(PATH_LIMIT, _prim_same_path);
3651      region->del_req(PATH_LIMIT);
3652    }
3653    region->set_req(_prim_0_path, control()); // Not equal after all.
3654  }
3655
3656  // these are the only paths that produce 'true':
3657  phi->set_req(_prim_same_path,   intcon(1));
3658  phi->set_req(_ref_subtype_path, intcon(1));
3659
3660  // pull together the cases:
3661  assert(region->req() == PATH_LIMIT, "sane region");
3662  for (uint i = 1; i < region->req(); i++) {
3663    Node* ctl = region->in(i);
3664    if (ctl == NULL || ctl == top()) {
3665      region->set_req(i, top());
3666      phi   ->set_req(i, top());
3667    } else if (phi->in(i) == NULL) {
3668      phi->set_req(i, intcon(0)); // all other paths produce 'false'
3669    }
3670  }
3671
3672  set_control(_gvn.transform(region));
3673  set_result(_gvn.transform(phi));
3674  return true;
3675}
3676
3677//---------------------generate_array_guard_common------------------------
3678Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3679                                                  bool obj_array, bool not_array) {
3680  // If obj_array/non_array==false/false:
3681  // Branch around if the given klass is in fact an array (either obj or prim).
3682  // If obj_array/non_array==false/true:
3683  // Branch around if the given klass is not an array klass of any kind.
3684  // If obj_array/non_array==true/true:
3685  // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3686  // If obj_array/non_array==true/false:
3687  // Branch around if the kls is an oop array (Object[] or subtype)
3688  //
3689  // Like generate_guard, adds a new path onto the region.
3690  jint  layout_con = 0;
3691  Node* layout_val = get_layout_helper(kls, layout_con);
3692  if (layout_val == NULL) {
3693    bool query = (obj_array
3694                  ? Klass::layout_helper_is_objArray(layout_con)
3695                  : Klass::layout_helper_is_array(layout_con));
3696    if (query == not_array) {
3697      return NULL;                       // never a branch
3698    } else {                             // always a branch
3699      Node* always_branch = control();
3700      if (region != NULL)
3701        region->add_req(always_branch);
3702      set_control(top());
3703      return always_branch;
3704    }
3705  }
3706  // Now test the correct condition.
3707  jint  nval = (obj_array
3708                ? ((jint)Klass::_lh_array_tag_type_value
3709                   <<    Klass::_lh_array_tag_shift)
3710                : Klass::_lh_neutral_value);
3711  Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval)));
3712  BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3713  // invert the test if we are looking for a non-array
3714  if (not_array)  btest = BoolTest(btest).negate();
3715  Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest));
3716  return generate_fair_guard(bol, region);
3717}
3718
3719
3720//-----------------------inline_native_newArray--------------------------
3721// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
3722bool LibraryCallKit::inline_native_newArray() {
3723  Node* mirror    = argument(0);
3724  Node* count_val = argument(1);
3725
3726  mirror = null_check(mirror);
3727  // If mirror or obj is dead, only null-path is taken.
3728  if (stopped())  return true;
3729
3730  enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3731  RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3732  PhiNode*    result_val = new(C) PhiNode(result_reg,
3733                                          TypeInstPtr::NOTNULL);
3734  PhiNode*    result_io  = new(C) PhiNode(result_reg, Type::ABIO);
3735  PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
3736                                          TypePtr::BOTTOM);
3737
3738  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3739  Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3740                                                  result_reg, _slow_path);
3741  Node* normal_ctl   = control();
3742  Node* no_array_ctl = result_reg->in(_slow_path);
3743
3744  // Generate code for the slow case.  We make a call to newArray().
3745  set_control(no_array_ctl);
3746  if (!stopped()) {
3747    // Either the input type is void.class, or else the
3748    // array klass has not yet been cached.  Either the
3749    // ensuing call will throw an exception, or else it
3750    // will cache the array klass for next time.
3751    PreserveJVMState pjvms(this);
3752    CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3753    Node* slow_result = set_results_for_java_call(slow_call);
3754    // this->control() comes from set_results_for_java_call
3755    result_reg->set_req(_slow_path, control());
3756    result_val->set_req(_slow_path, slow_result);
3757    result_io ->set_req(_slow_path, i_o());
3758    result_mem->set_req(_slow_path, reset_memory());
3759  }
3760
3761  set_control(normal_ctl);
3762  if (!stopped()) {
3763    // Normal case:  The array type has been cached in the java.lang.Class.
3764    // The following call works fine even if the array type is polymorphic.
3765    // It could be a dynamic mix of int[], boolean[], Object[], etc.
3766    Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
3767    result_reg->init_req(_normal_path, control());
3768    result_val->init_req(_normal_path, obj);
3769    result_io ->init_req(_normal_path, i_o());
3770    result_mem->init_req(_normal_path, reset_memory());
3771  }
3772
3773  // Return the combined state.
3774  set_i_o(        _gvn.transform(result_io)  );
3775  set_all_memory( _gvn.transform(result_mem));
3776
3777  C->set_has_split_ifs(true); // Has chance for split-if optimization
3778  set_result(result_reg, result_val);
3779  return true;
3780}
3781
3782//----------------------inline_native_getLength--------------------------
3783// public static native int java.lang.reflect.Array.getLength(Object array);
3784bool LibraryCallKit::inline_native_getLength() {
3785  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3786
3787  Node* array = null_check(argument(0));
3788  // If array is dead, only null-path is taken.
3789  if (stopped())  return true;
3790
3791  // Deoptimize if it is a non-array.
3792  Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3793
3794  if (non_array != NULL) {
3795    PreserveJVMState pjvms(this);
3796    set_control(non_array);
3797    uncommon_trap(Deoptimization::Reason_intrinsic,
3798                  Deoptimization::Action_maybe_recompile);
3799  }
3800
3801  // If control is dead, only non-array-path is taken.
3802  if (stopped())  return true;
3803
3804  // The works fine even if the array type is polymorphic.
3805  // It could be a dynamic mix of int[], boolean[], Object[], etc.
3806  Node* result = load_array_length(array);
3807
3808  C->set_has_split_ifs(true);  // Has chance for split-if optimization
3809  set_result(result);
3810  return true;
3811}
3812
3813//------------------------inline_array_copyOf----------------------------
3814// public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
3815// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
3816bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3817  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3818
3819  // Get the arguments.
3820  Node* original          = argument(0);
3821  Node* start             = is_copyOfRange? argument(1): intcon(0);
3822  Node* end               = is_copyOfRange? argument(2): argument(1);
3823  Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3824
3825  Node* newcopy;
3826
3827  // Set the original stack and the reexecute bit for the interpreter to reexecute
3828  // the bytecode that invokes Arrays.copyOf if deoptimization happens.
3829  { PreserveReexecuteState preexecs(this);
3830    jvms()->set_should_reexecute(true);
3831
3832    array_type_mirror = null_check(array_type_mirror);
3833    original          = null_check(original);
3834
3835    // Check if a null path was taken unconditionally.
3836    if (stopped())  return true;
3837
3838    Node* orig_length = load_array_length(original);
3839
3840    Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
3841    klass_node = null_check(klass_node);
3842
3843    RegionNode* bailout = new (C) RegionNode(1);
3844    record_for_igvn(bailout);
3845
3846    // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3847    // Bail out if that is so.
3848    Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3849    if (not_objArray != NULL) {
3850      // Improve the klass node's type from the new optimistic assumption:
3851      ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3852      const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3853      Node* cast = new (C) CastPPNode(klass_node, akls);
3854      cast->init_req(0, control());
3855      klass_node = _gvn.transform(cast);
3856    }
3857
3858    // Bail out if either start or end is negative.
3859    generate_negative_guard(start, bailout, &start);
3860    generate_negative_guard(end,   bailout, &end);
3861
3862    Node* length = end;
3863    if (_gvn.type(start) != TypeInt::ZERO) {
3864      length = _gvn.transform(new (C) SubINode(end, start));
3865    }
3866
3867    // Bail out if length is negative.
3868    // Without this the new_array would throw
3869    // NegativeArraySizeException but IllegalArgumentException is what
3870    // should be thrown
3871    generate_negative_guard(length, bailout, &length);
3872
3873    if (bailout->req() > 1) {
3874      PreserveJVMState pjvms(this);
3875      set_control(_gvn.transform(bailout));
3876      uncommon_trap(Deoptimization::Reason_intrinsic,
3877                    Deoptimization::Action_maybe_recompile);
3878    }
3879
3880    if (!stopped()) {
3881      // How many elements will we copy from the original?
3882      // The answer is MinI(orig_length - start, length).
3883      Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
3884      Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3885
3886      newcopy = new_array(klass_node, length, 0);  // no argments to push
3887
3888      // Generate a direct call to the right arraycopy function(s).
3889      // We know the copy is disjoint but we might not know if the
3890      // oop stores need checking.
3891      // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3892      // This will fail a store-check if x contains any non-nulls.
3893      bool disjoint_bases = true;
3894      // if start > orig_length then the length of the copy may be
3895      // negative.
3896      bool length_never_negative = !is_copyOfRange;
3897      generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
3898                         original, start, newcopy, intcon(0), moved,
3899                         disjoint_bases, length_never_negative);
3900    }
3901  } // original reexecute is set back here
3902
3903  C->set_has_split_ifs(true); // Has chance for split-if optimization
3904  if (!stopped()) {
3905    set_result(newcopy);
3906  }
3907  return true;
3908}
3909
3910
3911//----------------------generate_virtual_guard---------------------------
3912// Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
3913Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3914                                             RegionNode* slow_region) {
3915  ciMethod* method = callee();
3916  int vtable_index = method->vtable_index();
3917  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3918         err_msg_res("bad index %d", vtable_index));
3919  // Get the Method* out of the appropriate vtable entry.
3920  int entry_offset  = (InstanceKlass::vtable_start_offset() +
3921                     vtable_index*vtableEntry::size()) * wordSize +
3922                     vtableEntry::method_offset_in_bytes();
3923  Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
3924  Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3925
3926  // Compare the target method with the expected method (e.g., Object.hashCode).
3927  const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3928
3929  Node* native_call = makecon(native_call_addr);
3930  Node* chk_native  = _gvn.transform(new(C) CmpPNode(target_call, native_call));
3931  Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne));
3932
3933  return generate_slow_guard(test_native, slow_region);
3934}
3935
3936//-----------------------generate_method_call----------------------------
3937// Use generate_method_call to make a slow-call to the real
3938// method if the fast path fails.  An alternative would be to
3939// use a stub like OptoRuntime::slow_arraycopy_Java.
3940// This only works for expanding the current library call,
3941// not another intrinsic.  (E.g., don't use this for making an
3942// arraycopy call inside of the copyOf intrinsic.)
3943CallJavaNode*
3944LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3945  // When compiling the intrinsic method itself, do not use this technique.
3946  guarantee(callee() != C->method(), "cannot make slow-call to self");
3947
3948  ciMethod* method = callee();
3949  // ensure the JVMS we have will be correct for this call
3950  guarantee(method_id == method->intrinsic_id(), "must match");
3951
3952  const TypeFunc* tf = TypeFunc::make(method);
3953  CallJavaNode* slow_call;
3954  if (is_static) {
3955    assert(!is_virtual, "");
3956    slow_call = new(C) CallStaticJavaNode(C, tf,
3957                           SharedRuntime::get_resolve_static_call_stub(),
3958                           method, bci());
3959  } else if (is_virtual) {
3960    null_check_receiver();
3961    int vtable_index = Method::invalid_vtable_index;
3962    if (UseInlineCaches) {
3963      // Suppress the vtable call
3964    } else {
3965      // hashCode and clone are not a miranda methods,
3966      // so the vtable index is fixed.
3967      // No need to use the linkResolver to get it.
3968       vtable_index = method->vtable_index();
3969       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
3970              err_msg_res("bad index %d", vtable_index));
3971    }
3972    slow_call = new(C) CallDynamicJavaNode(tf,
3973                          SharedRuntime::get_resolve_virtual_call_stub(),
3974                          method, vtable_index, bci());
3975  } else {  // neither virtual nor static:  opt_virtual
3976    null_check_receiver();
3977    slow_call = new(C) CallStaticJavaNode(C, tf,
3978                                SharedRuntime::get_resolve_opt_virtual_call_stub(),
3979                                method, bci());
3980    slow_call->set_optimized_virtual(true);
3981  }
3982  set_arguments_for_java_call(slow_call);
3983  set_edges_for_java_call(slow_call);
3984  return slow_call;
3985}
3986
3987
3988//------------------------------inline_native_hashcode--------------------
3989// Build special case code for calls to hashCode on an object.
3990bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3991  assert(is_static == callee()->is_static(), "correct intrinsic selection");
3992  assert(!(is_virtual && is_static), "either virtual, special, or static");
3993
3994  enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3995
3996  RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
3997  PhiNode*    result_val = new(C) PhiNode(result_reg,
3998                                          TypeInt::INT);
3999  PhiNode*    result_io  = new(C) PhiNode(result_reg, Type::ABIO);
4000  PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
4001                                          TypePtr::BOTTOM);
4002  Node* obj = NULL;
4003  if (!is_static) {
4004    // Check for hashing null object
4005    obj = null_check_receiver();
4006    if (stopped())  return true;        // unconditionally null
4007    result_reg->init_req(_null_path, top());
4008    result_val->init_req(_null_path, top());
4009  } else {
4010    // Do a null check, and return zero if null.
4011    // System.identityHashCode(null) == 0
4012    obj = argument(0);
4013    Node* null_ctl = top();
4014    obj = null_check_oop(obj, &null_ctl);
4015    result_reg->init_req(_null_path, null_ctl);
4016    result_val->init_req(_null_path, _gvn.intcon(0));
4017  }
4018
4019  // Unconditionally null?  Then return right away.
4020  if (stopped()) {
4021    set_control( result_reg->in(_null_path));
4022    if (!stopped())
4023      set_result(result_val->in(_null_path));
4024    return true;
4025  }
4026
4027  // After null check, get the object's klass.
4028  Node* obj_klass = load_object_klass(obj);
4029
4030  // This call may be virtual (invokevirtual) or bound (invokespecial).
4031  // For each case we generate slightly different code.
4032
4033  // We only go to the fast case code if we pass a number of guards.  The
4034  // paths which do not pass are accumulated in the slow_region.
4035  RegionNode* slow_region = new (C) RegionNode(1);
4036  record_for_igvn(slow_region);
4037
4038  // If this is a virtual call, we generate a funny guard.  We pull out
4039  // the vtable entry corresponding to hashCode() from the target object.
4040  // If the target method which we are calling happens to be the native
4041  // Object hashCode() method, we pass the guard.  We do not need this
4042  // guard for non-virtual calls -- the caller is known to be the native
4043  // Object hashCode().
4044  if (is_virtual) {
4045    generate_virtual_guard(obj_klass, slow_region);
4046  }
4047
4048  // Get the header out of the object, use LoadMarkNode when available
4049  Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4050  Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4051
4052  // Test the header to see if it is unlocked.
4053  Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4054  Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
4055  Node *unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
4056  Node *chk_unlocked   = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val));
4057  Node *test_unlocked  = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne));
4058
4059  generate_slow_guard(test_unlocked, slow_region);
4060
4061  // Get the hash value and check to see that it has been properly assigned.
4062  // We depend on hash_mask being at most 32 bits and avoid the use of
4063  // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4064  // vm: see markOop.hpp.
4065  Node *hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
4066  Node *hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
4067  Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift));
4068  // This hack lets the hash bits live anywhere in the mark object now, as long
4069  // as the shift drops the relevant bits into the low 32 bits.  Note that
4070  // Java spec says that HashCode is an int so there's no point in capturing
4071  // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4072  hshifted_header      = ConvX2I(hshifted_header);
4073  Node *hash_val       = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask));
4074
4075  Node *no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
4076  Node *chk_assigned   = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val));
4077  Node *test_assigned  = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq));
4078
4079  generate_slow_guard(test_assigned, slow_region);
4080
4081  Node* init_mem = reset_memory();
4082  // fill in the rest of the null path:
4083  result_io ->init_req(_null_path, i_o());
4084  result_mem->init_req(_null_path, init_mem);
4085
4086  result_val->init_req(_fast_path, hash_val);
4087  result_reg->init_req(_fast_path, control());
4088  result_io ->init_req(_fast_path, i_o());
4089  result_mem->init_req(_fast_path, init_mem);
4090
4091  // Generate code for the slow case.  We make a call to hashCode().
4092  set_control(_gvn.transform(slow_region));
4093  if (!stopped()) {
4094    // No need for PreserveJVMState, because we're using up the present state.
4095    set_all_memory(init_mem);
4096    vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
4097    CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
4098    Node* slow_result = set_results_for_java_call(slow_call);
4099    // this->control() comes from set_results_for_java_call
4100    result_reg->init_req(_slow_path, control());
4101    result_val->init_req(_slow_path, slow_result);
4102    result_io  ->set_req(_slow_path, i_o());
4103    result_mem ->set_req(_slow_path, reset_memory());
4104  }
4105
4106  // Return the combined state.
4107  set_i_o(        _gvn.transform(result_io)  );
4108  set_all_memory( _gvn.transform(result_mem));
4109
4110  set_result(result_reg, result_val);
4111  return true;
4112}
4113
4114//---------------------------inline_native_getClass----------------------------
4115// public final native Class<?> java.lang.Object.getClass();
4116//
4117// Build special case code for calls to getClass on an object.
4118bool LibraryCallKit::inline_native_getClass() {
4119  Node* obj = null_check_receiver();
4120  if (stopped())  return true;
4121  set_result(load_mirror_from_klass(load_object_klass(obj)));
4122  return true;
4123}
4124
4125//-----------------inline_native_Reflection_getCallerClass---------------------
4126// public static native Class<?> sun.reflect.Reflection.getCallerClass();
4127//
4128// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
4129//
4130// NOTE: This code must perform the same logic as JVM_GetCallerClass
4131// in that it must skip particular security frames and checks for
4132// caller sensitive methods.
4133bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
4134#ifndef PRODUCT
4135  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4136    tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
4137  }
4138#endif
4139
4140  if (!jvms()->has_method()) {
4141#ifndef PRODUCT
4142    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4143      tty->print_cr("  Bailing out because intrinsic was inlined at top level");
4144    }
4145#endif
4146    return false;
4147  }
4148
4149  // Walk back up the JVM state to find the caller at the required
4150  // depth.
4151  JVMState* caller_jvms = jvms();
4152
4153  // Cf. JVM_GetCallerClass
4154  // NOTE: Start the loop at depth 1 because the current JVM state does
4155  // not include the Reflection.getCallerClass() frame.
4156  for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) {
4157    ciMethod* m = caller_jvms->method();
4158    switch (n) {
4159    case 0:
4160      fatal("current JVM state does not include the Reflection.getCallerClass frame");
4161      break;
4162    case 1:
4163      // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
4164      if (!m->caller_sensitive()) {
4165#ifndef PRODUCT
4166        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4167          tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
4168        }
4169#endif
4170        return false;  // bail-out; let JVM_GetCallerClass do the work
4171      }
4172      break;
4173    default:
4174      if (!m->is_ignored_by_security_stack_walk()) {
4175        // We have reached the desired frame; return the holder class.
4176        // Acquire method holder as java.lang.Class and push as constant.
4177        ciInstanceKlass* caller_klass = caller_jvms->method()->holder();
4178        ciInstance* caller_mirror = caller_klass->java_mirror();
4179        set_result(makecon(TypeInstPtr::make(caller_mirror)));
4180
4181#ifndef PRODUCT
4182        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4183          tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
4184          tty->print_cr("  JVM state at this point:");
4185          for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4186            ciMethod* m = jvms()->of_depth(i)->method();
4187            tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4188          }
4189        }
4190#endif
4191        return true;
4192      }
4193      break;
4194    }
4195  }
4196
4197#ifndef PRODUCT
4198  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
4199    tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
4200    tty->print_cr("  JVM state at this point:");
4201    for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
4202      ciMethod* m = jvms()->of_depth(i)->method();
4203      tty->print_cr("   %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8());
4204    }
4205  }
4206#endif
4207
4208  return false;  // bail-out; let JVM_GetCallerClass do the work
4209}
4210
4211bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4212  Node* arg = argument(0);
4213  Node* result;
4214
4215  switch (id) {
4216  case vmIntrinsics::_floatToRawIntBits:    result = new (C) MoveF2INode(arg);  break;
4217  case vmIntrinsics::_intBitsToFloat:       result = new (C) MoveI2FNode(arg);  break;
4218  case vmIntrinsics::_doubleToRawLongBits:  result = new (C) MoveD2LNode(arg);  break;
4219  case vmIntrinsics::_longBitsToDouble:     result = new (C) MoveL2DNode(arg);  break;
4220
4221  case vmIntrinsics::_doubleToLongBits: {
4222    // two paths (plus control) merge in a wood
4223    RegionNode *r = new (C) RegionNode(3);
4224    Node *phi = new (C) PhiNode(r, TypeLong::LONG);
4225
4226    Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
4227    // Build the boolean node
4228    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4229
4230    // Branch either way.
4231    // NaN case is less traveled, which makes all the difference.
4232    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4233    Node *opt_isnan = _gvn.transform(ifisnan);
4234    assert( opt_isnan->is_If(), "Expect an IfNode");
4235    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4236    Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4237
4238    set_control(iftrue);
4239
4240    static const jlong nan_bits = CONST64(0x7ff8000000000000);
4241    Node *slow_result = longcon(nan_bits); // return NaN
4242    phi->init_req(1, _gvn.transform( slow_result ));
4243    r->init_req(1, iftrue);
4244
4245    // Else fall through
4246    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4247    set_control(iffalse);
4248
4249    phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
4250    r->init_req(2, iffalse);
4251
4252    // Post merge
4253    set_control(_gvn.transform(r));
4254    record_for_igvn(r);
4255
4256    C->set_has_split_ifs(true); // Has chance for split-if optimization
4257    result = phi;
4258    assert(result->bottom_type()->isa_long(), "must be");
4259    break;
4260  }
4261
4262  case vmIntrinsics::_floatToIntBits: {
4263    // two paths (plus control) merge in a wood
4264    RegionNode *r = new (C) RegionNode(3);
4265    Node *phi = new (C) PhiNode(r, TypeInt::INT);
4266
4267    Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
4268    // Build the boolean node
4269    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
4270
4271    // Branch either way.
4272    // NaN case is less traveled, which makes all the difference.
4273    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
4274    Node *opt_isnan = _gvn.transform(ifisnan);
4275    assert( opt_isnan->is_If(), "Expect an IfNode");
4276    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
4277    Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan));
4278
4279    set_control(iftrue);
4280
4281    static const jint nan_bits = 0x7fc00000;
4282    Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
4283    phi->init_req(1, _gvn.transform( slow_result ));
4284    r->init_req(1, iftrue);
4285
4286    // Else fall through
4287    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
4288    set_control(iffalse);
4289
4290    phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
4291    r->init_req(2, iffalse);
4292
4293    // Post merge
4294    set_control(_gvn.transform(r));
4295    record_for_igvn(r);
4296
4297    C->set_has_split_ifs(true); // Has chance for split-if optimization
4298    result = phi;
4299    assert(result->bottom_type()->isa_int(), "must be");
4300    break;
4301  }
4302
4303  default:
4304    fatal_unexpected_iid(id);
4305    break;
4306  }
4307  set_result(_gvn.transform(result));
4308  return true;
4309}
4310
4311#ifdef _LP64
4312#define XTOP ,top() /*additional argument*/
4313#else  //_LP64
4314#define XTOP        /*no additional argument*/
4315#endif //_LP64
4316
4317//----------------------inline_unsafe_copyMemory-------------------------
4318// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
4319bool LibraryCallKit::inline_unsafe_copyMemory() {
4320  if (callee()->is_static())  return false;  // caller must have the capability!
4321  null_check_receiver();  // null-check receiver
4322  if (stopped())  return true;
4323
4324  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
4325
4326  Node* src_ptr =         argument(1);   // type: oop
4327  Node* src_off = ConvL2X(argument(2));  // type: long
4328  Node* dst_ptr =         argument(4);   // type: oop
4329  Node* dst_off = ConvL2X(argument(5));  // type: long
4330  Node* size    = ConvL2X(argument(7));  // type: long
4331
4332  assert(Unsafe_field_offset_to_byte_offset(11) == 11,
4333         "fieldOffset must be byte-scaled");
4334
4335  Node* src = make_unsafe_address(src_ptr, src_off);
4336  Node* dst = make_unsafe_address(dst_ptr, dst_off);
4337
4338  // Conservatively insert a memory barrier on all memory slices.
4339  // Do not let writes of the copy source or destination float below the copy.
4340  insert_mem_bar(Op_MemBarCPUOrder);
4341
4342  // Call it.  Note that the length argument is not scaled.
4343  make_runtime_call(RC_LEAF|RC_NO_FP,
4344                    OptoRuntime::fast_arraycopy_Type(),
4345                    StubRoutines::unsafe_arraycopy(),
4346                    "unsafe_arraycopy",
4347                    TypeRawPtr::BOTTOM,
4348                    src, dst, size XTOP);
4349
4350  // Do not let reads of the copy destination float above the copy.
4351  insert_mem_bar(Op_MemBarCPUOrder);
4352
4353  return true;
4354}
4355
4356//------------------------clone_coping-----------------------------------
4357// Helper function for inline_native_clone.
4358void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
4359  assert(obj_size != NULL, "");
4360  Node* raw_obj = alloc_obj->in(1);
4361  assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
4362
4363  AllocateNode* alloc = NULL;
4364  if (ReduceBulkZeroing) {
4365    // We will be completely responsible for initializing this object -
4366    // mark Initialize node as complete.
4367    alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
4368    // The object was just allocated - there should be no any stores!
4369    guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
4370    // Mark as complete_with_arraycopy so that on AllocateNode
4371    // expansion, we know this AllocateNode is initialized by an array
4372    // copy and a StoreStore barrier exists after the array copy.
4373    alloc->initialization()->set_complete_with_arraycopy();
4374  }
4375
4376  // Copy the fastest available way.
4377  // TODO: generate fields copies for small objects instead.
4378  Node* src  = obj;
4379  Node* dest = alloc_obj;
4380  Node* size = _gvn.transform(obj_size);
4381
4382  // Exclude the header but include array length to copy by 8 bytes words.
4383  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
4384  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
4385                            instanceOopDesc::base_offset_in_bytes();
4386  // base_off:
4387  // 8  - 32-bit VM
4388  // 12 - 64-bit VM, compressed klass
4389  // 16 - 64-bit VM, normal klass
4390  if (base_off % BytesPerLong != 0) {
4391    assert(UseCompressedClassPointers, "");
4392    if (is_array) {
4393      // Exclude length to copy by 8 bytes words.
4394      base_off += sizeof(int);
4395    } else {
4396      // Include klass to copy by 8 bytes words.
4397      base_off = instanceOopDesc::klass_offset_in_bytes();
4398    }
4399    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
4400  }
4401  src  = basic_plus_adr(src,  base_off);
4402  dest = basic_plus_adr(dest, base_off);
4403
4404  // Compute the length also, if needed:
4405  Node* countx = size;
4406  countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off)));
4407  countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) ));
4408
4409  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4410  bool disjoint_bases = true;
4411  generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
4412                               src, NULL, dest, NULL, countx,
4413                               /*dest_uninitialized*/true);
4414
4415  // If necessary, emit some card marks afterwards.  (Non-arrays only.)
4416  if (card_mark) {
4417    assert(!is_array, "");
4418    // Put in store barrier for any and all oops we are sticking
4419    // into this object.  (We could avoid this if we could prove
4420    // that the object type contains no oop fields at all.)
4421    Node* no_particular_value = NULL;
4422    Node* no_particular_field = NULL;
4423    int raw_adr_idx = Compile::AliasIdxRaw;
4424    post_barrier(control(),
4425                 memory(raw_adr_type),
4426                 alloc_obj,
4427                 no_particular_field,
4428                 raw_adr_idx,
4429                 no_particular_value,
4430                 T_OBJECT,
4431                 false);
4432  }
4433
4434  // Do not let reads from the cloned object float above the arraycopy.
4435  if (alloc != NULL) {
4436    // Do not let stores that initialize this object be reordered with
4437    // a subsequent store that would make this object accessible by
4438    // other threads.
4439    // Record what AllocateNode this StoreStore protects so that
4440    // escape analysis can go from the MemBarStoreStoreNode to the
4441    // AllocateNode and eliminate the MemBarStoreStoreNode if possible
4442    // based on the escape status of the AllocateNode.
4443    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
4444  } else {
4445    insert_mem_bar(Op_MemBarCPUOrder);
4446  }
4447}
4448
4449//------------------------inline_native_clone----------------------------
4450// protected native Object java.lang.Object.clone();
4451//
4452// Here are the simple edge cases:
4453//  null receiver => normal trap
4454//  virtual and clone was overridden => slow path to out-of-line clone
4455//  not cloneable or finalizer => slow path to out-of-line Object.clone
4456//
4457// The general case has two steps, allocation and copying.
4458// Allocation has two cases, and uses GraphKit::new_instance or new_array.
4459//
4460// Copying also has two cases, oop arrays and everything else.
4461// Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
4462// Everything else uses the tight inline loop supplied by CopyArrayNode.
4463//
4464// These steps fold up nicely if and when the cloned object's klass
4465// can be sharply typed as an object array, a type array, or an instance.
4466//
4467bool LibraryCallKit::inline_native_clone(bool is_virtual) {
4468  PhiNode* result_val;
4469
4470  // Set the reexecute bit for the interpreter to reexecute
4471  // the bytecode that invokes Object.clone if deoptimization happens.
4472  { PreserveReexecuteState preexecs(this);
4473    jvms()->set_should_reexecute(true);
4474
4475    Node* obj = null_check_receiver();
4476    if (stopped())  return true;
4477
4478    Node* obj_klass = load_object_klass(obj);
4479    const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4480    const TypeOopPtr*   toop   = ((tklass != NULL)
4481                                ? tklass->as_instance_type()
4482                                : TypeInstPtr::NOTNULL);
4483
4484    // Conservatively insert a memory barrier on all memory slices.
4485    // Do not let writes into the original float below the clone.
4486    insert_mem_bar(Op_MemBarCPUOrder);
4487
4488    // paths into result_reg:
4489    enum {
4490      _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4491      _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4492      _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4493      _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4494      PATH_LIMIT
4495    };
4496    RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT);
4497    result_val             = new(C) PhiNode(result_reg,
4498                                            TypeInstPtr::NOTNULL);
4499    PhiNode*    result_i_o = new(C) PhiNode(result_reg, Type::ABIO);
4500    PhiNode*    result_mem = new(C) PhiNode(result_reg, Type::MEMORY,
4501                                            TypePtr::BOTTOM);
4502    record_for_igvn(result_reg);
4503
4504    const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4505    int raw_adr_idx = Compile::AliasIdxRaw;
4506
4507    Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4508    if (array_ctl != NULL) {
4509      // It's an array.
4510      PreserveJVMState pjvms(this);
4511      set_control(array_ctl);
4512      Node* obj_length = load_array_length(obj);
4513      Node* obj_size  = NULL;
4514      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
4515
4516      if (!use_ReduceInitialCardMarks()) {
4517        // If it is an oop array, it requires very special treatment,
4518        // because card marking is required on each card of the array.
4519        Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4520        if (is_obja != NULL) {
4521          PreserveJVMState pjvms2(this);
4522          set_control(is_obja);
4523          // Generate a direct call to the right arraycopy function(s).
4524          bool disjoint_bases = true;
4525          bool length_never_negative = true;
4526          generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4527                             obj, intcon(0), alloc_obj, intcon(0),
4528                             obj_length,
4529                             disjoint_bases, length_never_negative);
4530          result_reg->init_req(_objArray_path, control());
4531          result_val->init_req(_objArray_path, alloc_obj);
4532          result_i_o ->set_req(_objArray_path, i_o());
4533          result_mem ->set_req(_objArray_path, reset_memory());
4534        }
4535      }
4536      // Otherwise, there are no card marks to worry about.
4537      // (We can dispense with card marks if we know the allocation
4538      //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4539      //  causes the non-eden paths to take compensating steps to
4540      //  simulate a fresh allocation, so that no further
4541      //  card marks are required in compiled code to initialize
4542      //  the object.)
4543
4544      if (!stopped()) {
4545        copy_to_clone(obj, alloc_obj, obj_size, true, false);
4546
4547        // Present the results of the copy.
4548        result_reg->init_req(_array_path, control());
4549        result_val->init_req(_array_path, alloc_obj);
4550        result_i_o ->set_req(_array_path, i_o());
4551        result_mem ->set_req(_array_path, reset_memory());
4552      }
4553    }
4554
4555    // We only go to the instance fast case code if we pass a number of guards.
4556    // The paths which do not pass are accumulated in the slow_region.
4557    RegionNode* slow_region = new (C) RegionNode(1);
4558    record_for_igvn(slow_region);
4559    if (!stopped()) {
4560      // It's an instance (we did array above).  Make the slow-path tests.
4561      // If this is a virtual call, we generate a funny guard.  We grab
4562      // the vtable entry corresponding to clone() from the target object.
4563      // If the target method which we are calling happens to be the
4564      // Object clone() method, we pass the guard.  We do not need this
4565      // guard for non-virtual calls; the caller is known to be the native
4566      // Object clone().
4567      if (is_virtual) {
4568        generate_virtual_guard(obj_klass, slow_region);
4569      }
4570
4571      // The object must be cloneable and must not have a finalizer.
4572      // Both of these conditions may be checked in a single test.
4573      // We could optimize the cloneable test further, but we don't care.
4574      generate_access_flags_guard(obj_klass,
4575                                  // Test both conditions:
4576                                  JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4577                                  // Must be cloneable but not finalizer:
4578                                  JVM_ACC_IS_CLONEABLE,
4579                                  slow_region);
4580    }
4581
4582    if (!stopped()) {
4583      // It's an instance, and it passed the slow-path tests.
4584      PreserveJVMState pjvms(this);
4585      Node* obj_size  = NULL;
4586      Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
4587
4588      copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4589
4590      // Present the results of the slow call.
4591      result_reg->init_req(_instance_path, control());
4592      result_val->init_req(_instance_path, alloc_obj);
4593      result_i_o ->set_req(_instance_path, i_o());
4594      result_mem ->set_req(_instance_path, reset_memory());
4595    }
4596
4597    // Generate code for the slow case.  We make a call to clone().
4598    set_control(_gvn.transform(slow_region));
4599    if (!stopped()) {
4600      PreserveJVMState pjvms(this);
4601      CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4602      Node* slow_result = set_results_for_java_call(slow_call);
4603      // this->control() comes from set_results_for_java_call
4604      result_reg->init_req(_slow_path, control());
4605      result_val->init_req(_slow_path, slow_result);
4606      result_i_o ->set_req(_slow_path, i_o());
4607      result_mem ->set_req(_slow_path, reset_memory());
4608    }
4609
4610    // Return the combined state.
4611    set_control(    _gvn.transform(result_reg));
4612    set_i_o(        _gvn.transform(result_i_o));
4613    set_all_memory( _gvn.transform(result_mem));
4614  } // original reexecute is set back here
4615
4616  set_result(_gvn.transform(result_val));
4617  return true;
4618}
4619
4620//------------------------------basictype2arraycopy----------------------------
4621address LibraryCallKit::basictype2arraycopy(BasicType t,
4622                                            Node* src_offset,
4623                                            Node* dest_offset,
4624                                            bool disjoint_bases,
4625                                            const char* &name,
4626                                            bool dest_uninitialized) {
4627  const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
4628  const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4629
4630  bool aligned = false;
4631  bool disjoint = disjoint_bases;
4632
4633  // if the offsets are the same, we can treat the memory regions as
4634  // disjoint, because either the memory regions are in different arrays,
4635  // or they are identical (which we can treat as disjoint.)  We can also
4636  // treat a copy with a destination index  less that the source index
4637  // as disjoint since a low->high copy will work correctly in this case.
4638  if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
4639      dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
4640    // both indices are constants
4641    int s_offs = src_offset_inttype->get_con();
4642    int d_offs = dest_offset_inttype->get_con();
4643    int element_size = type2aelembytes(t);
4644    aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4645              ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4646    if (s_offs >= d_offs)  disjoint = true;
4647  } else if (src_offset == dest_offset && src_offset != NULL) {
4648    // This can occur if the offsets are identical non-constants.
4649    disjoint = true;
4650  }
4651
4652  return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized);
4653}
4654
4655
4656//------------------------------inline_arraycopy-----------------------
4657// public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
4658//                                                      Object dest, int destPos,
4659//                                                      int length);
4660bool LibraryCallKit::inline_arraycopy() {
4661  // Get the arguments.
4662  Node* src         = argument(0);  // type: oop
4663  Node* src_offset  = argument(1);  // type: int
4664  Node* dest        = argument(2);  // type: oop
4665  Node* dest_offset = argument(3);  // type: int
4666  Node* length      = argument(4);  // type: int
4667
4668  // Compile time checks.  If any of these checks cannot be verified at compile time,
4669  // we do not make a fast path for this call.  Instead, we let the call remain as it
4670  // is.  The checks we choose to mandate at compile time are:
4671  //
4672  // (1) src and dest are arrays.
4673  const Type* src_type  = src->Value(&_gvn);
4674  const Type* dest_type = dest->Value(&_gvn);
4675  const TypeAryPtr* top_src  = src_type->isa_aryptr();
4676  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4677
4678  // Do we have the type of src?
4679  bool has_src = (top_src != NULL && top_src->klass() != NULL);
4680  // Do we have the type of dest?
4681  bool has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4682  // Is the type for src from speculation?
4683  bool src_spec = false;
4684  // Is the type for dest from speculation?
4685  bool dest_spec = false;
4686
4687  if (!has_src || !has_dest) {
4688    // We don't have sufficient type information, let's see if
4689    // speculative types can help. We need to have types for both src
4690    // and dest so that it pays off.
4691
4692    // Do we already have or could we have type information for src
4693    bool could_have_src = has_src;
4694    // Do we already have or could we have type information for dest
4695    bool could_have_dest = has_dest;
4696
4697    ciKlass* src_k = NULL;
4698    if (!has_src) {
4699      src_k = src_type->speculative_type_not_null();
4700      if (src_k != NULL && src_k->is_array_klass()) {
4701        could_have_src = true;
4702      }
4703    }
4704
4705    ciKlass* dest_k = NULL;
4706    if (!has_dest) {
4707      dest_k = dest_type->speculative_type_not_null();
4708      if (dest_k != NULL && dest_k->is_array_klass()) {
4709        could_have_dest = true;
4710      }
4711    }
4712
4713    if (could_have_src && could_have_dest) {
4714      // This is going to pay off so emit the required guards
4715      if (!has_src) {
4716        src = maybe_cast_profiled_obj(src, src_k);
4717        src_type  = _gvn.type(src);
4718        top_src  = src_type->isa_aryptr();
4719        has_src = (top_src != NULL && top_src->klass() != NULL);
4720        src_spec = true;
4721      }
4722      if (!has_dest) {
4723        dest = maybe_cast_profiled_obj(dest, dest_k);
4724        dest_type  = _gvn.type(dest);
4725        top_dest  = dest_type->isa_aryptr();
4726        has_dest = (top_dest != NULL && top_dest->klass() != NULL);
4727        dest_spec = true;
4728      }
4729    }
4730  }
4731
4732  if (!has_src || !has_dest) {
4733    // Conservatively insert a memory barrier on all memory slices.
4734    // Do not let writes into the source float below the arraycopy.
4735    insert_mem_bar(Op_MemBarCPUOrder);
4736
4737    // Call StubRoutines::generic_arraycopy stub.
4738    generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4739                       src, src_offset, dest, dest_offset, length);
4740
4741    // Do not let reads from the destination float above the arraycopy.
4742    // Since we cannot type the arrays, we don't know which slices
4743    // might be affected.  We could restrict this barrier only to those
4744    // memory slices which pertain to array elements--but don't bother.
4745    if (!InsertMemBarAfterArraycopy)
4746      // (If InsertMemBarAfterArraycopy, there is already one in place.)
4747      insert_mem_bar(Op_MemBarCPUOrder);
4748    return true;
4749  }
4750
4751  // (2) src and dest arrays must have elements of the same BasicType
4752  // Figure out the size and type of the elements we will be copying.
4753  BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4754  BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4755  if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4756  if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4757
4758  if (src_elem != dest_elem || dest_elem == T_VOID) {
4759    // The component types are not the same or are not recognized.  Punt.
4760    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4761    generate_slow_arraycopy(TypePtr::BOTTOM,
4762                            src, src_offset, dest, dest_offset, length,
4763                            /*dest_uninitialized*/false);
4764    return true;
4765  }
4766
4767  if (src_elem == T_OBJECT) {
4768    // If both arrays are object arrays then having the exact types
4769    // for both will remove the need for a subtype check at runtime
4770    // before the call and may make it possible to pick a faster copy
4771    // routine (without a subtype check on every element)
4772    // Do we have the exact type of src?
4773    bool could_have_src = src_spec;
4774    // Do we have the exact type of dest?
4775    bool could_have_dest = dest_spec;
4776    ciKlass* src_k = top_src->klass();
4777    ciKlass* dest_k = top_dest->klass();
4778    if (!src_spec) {
4779      src_k = src_type->speculative_type_not_null();
4780      if (src_k != NULL && src_k->is_array_klass()) {
4781          could_have_src = true;
4782      }
4783    }
4784    if (!dest_spec) {
4785      dest_k = dest_type->speculative_type_not_null();
4786      if (dest_k != NULL && dest_k->is_array_klass()) {
4787        could_have_dest = true;
4788      }
4789    }
4790    if (could_have_src && could_have_dest) {
4791      // If we can have both exact types, emit the missing guards
4792      if (could_have_src && !src_spec) {
4793        src = maybe_cast_profiled_obj(src, src_k);
4794      }
4795      if (could_have_dest && !dest_spec) {
4796        dest = maybe_cast_profiled_obj(dest, dest_k);
4797      }
4798    }
4799  }
4800
4801  //---------------------------------------------------------------------------
4802  // We will make a fast path for this call to arraycopy.
4803
4804  // We have the following tests left to perform:
4805  //
4806  // (3) src and dest must not be null.
4807  // (4) src_offset must not be negative.
4808  // (5) dest_offset must not be negative.
4809  // (6) length must not be negative.
4810  // (7) src_offset + length must not exceed length of src.
4811  // (8) dest_offset + length must not exceed length of dest.
4812  // (9) each element of an oop array must be assignable
4813
4814  RegionNode* slow_region = new (C) RegionNode(1);
4815  record_for_igvn(slow_region);
4816
4817  // (3) operands must not be null
4818  // We currently perform our null checks with the null_check routine.
4819  // This means that the null exceptions will be reported in the caller
4820  // rather than (correctly) reported inside of the native arraycopy call.
4821  // This should be corrected, given time.  We do our null check with the
4822  // stack pointer restored.
4823  src  = null_check(src,  T_ARRAY);
4824  dest = null_check(dest, T_ARRAY);
4825
4826  // (4) src_offset must not be negative.
4827  generate_negative_guard(src_offset, slow_region);
4828
4829  // (5) dest_offset must not be negative.
4830  generate_negative_guard(dest_offset, slow_region);
4831
4832  // (6) length must not be negative (moved to generate_arraycopy()).
4833  // generate_negative_guard(length, slow_region);
4834
4835  // (7) src_offset + length must not exceed length of src.
4836  generate_limit_guard(src_offset, length,
4837                       load_array_length(src),
4838                       slow_region);
4839
4840  // (8) dest_offset + length must not exceed length of dest.
4841  generate_limit_guard(dest_offset, length,
4842                       load_array_length(dest),
4843                       slow_region);
4844
4845  // (9) each element of an oop array must be assignable
4846  // The generate_arraycopy subroutine checks this.
4847
4848  // This is where the memory effects are placed:
4849  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
4850  generate_arraycopy(adr_type, dest_elem,
4851                     src, src_offset, dest, dest_offset, length,
4852                     false, false, slow_region);
4853
4854  return true;
4855}
4856
4857//-----------------------------generate_arraycopy----------------------
4858// Generate an optimized call to arraycopy.
4859// Caller must guard against non-arrays.
4860// Caller must determine a common array basic-type for both arrays.
4861// Caller must validate offsets against array bounds.
4862// The slow_region has already collected guard failure paths
4863// (such as out of bounds length or non-conformable array types).
4864// The generated code has this shape, in general:
4865//
4866//     if (length == 0)  return   // via zero_path
4867//     slowval = -1
4868//     if (types unknown) {
4869//       slowval = call generic copy loop
4870//       if (slowval == 0)  return  // via checked_path
4871//     } else if (indexes in bounds) {
4872//       if ((is object array) && !(array type check)) {
4873//         slowval = call checked copy loop
4874//         if (slowval == 0)  return  // via checked_path
4875//       } else {
4876//         call bulk copy loop
4877//         return  // via fast_path
4878//       }
4879//     }
4880//     // adjust params for remaining work:
4881//     if (slowval != -1) {
4882//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
4883//     }
4884//   slow_region:
4885//     call slow arraycopy(src, src_offset, dest, dest_offset, length)
4886//     return  // via slow_call_path
4887//
4888// This routine is used from several intrinsics:  System.arraycopy,
4889// Object.clone (the array subcase), and Arrays.copyOf[Range].
4890//
4891void
4892LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
4893                                   BasicType basic_elem_type,
4894                                   Node* src,  Node* src_offset,
4895                                   Node* dest, Node* dest_offset,
4896                                   Node* copy_length,
4897                                   bool disjoint_bases,
4898                                   bool length_never_negative,
4899                                   RegionNode* slow_region) {
4900
4901  if (slow_region == NULL) {
4902    slow_region = new(C) RegionNode(1);
4903    record_for_igvn(slow_region);
4904  }
4905
4906  Node* original_dest      = dest;
4907  AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
4908  bool  dest_uninitialized = false;
4909
4910  // See if this is the initialization of a newly-allocated array.
4911  // If so, we will take responsibility here for initializing it to zero.
4912  // (Note:  Because tightly_coupled_allocation performs checks on the
4913  // out-edges of the dest, we need to avoid making derived pointers
4914  // from it until we have checked its uses.)
4915  if (ReduceBulkZeroing
4916      && !ZeroTLAB              // pointless if already zeroed
4917      && basic_elem_type != T_CONFLICT // avoid corner case
4918      && !src->eqv_uncast(dest)
4919      && ((alloc = tightly_coupled_allocation(dest, slow_region))
4920          != NULL)
4921      && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
4922      && alloc->maybe_set_complete(&_gvn)) {
4923    // "You break it, you buy it."
4924    InitializeNode* init = alloc->initialization();
4925    assert(init->is_complete(), "we just did this");
4926    init->set_complete_with_arraycopy();
4927    assert(dest->is_CheckCastPP(), "sanity");
4928    assert(dest->in(0)->in(0) == init, "dest pinned");
4929    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
4930    // From this point on, every exit path is responsible for
4931    // initializing any non-copied parts of the object to zero.
4932    // Also, if this flag is set we make sure that arraycopy interacts properly
4933    // with G1, eliding pre-barriers. See CR 6627983.
4934    dest_uninitialized = true;
4935  } else {
4936    // No zeroing elimination here.
4937    alloc             = NULL;
4938    //original_dest   = dest;
4939    //dest_uninitialized = false;
4940  }
4941
4942  // Results are placed here:
4943  enum { fast_path        = 1,  // normal void-returning assembly stub
4944         checked_path     = 2,  // special assembly stub with cleanup
4945         slow_call_path   = 3,  // something went wrong; call the VM
4946         zero_path        = 4,  // bypass when length of copy is zero
4947         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
4948         PATH_LIMIT       = 6
4949  };
4950  RegionNode* result_region = new(C) RegionNode(PATH_LIMIT);
4951  PhiNode*    result_i_o    = new(C) PhiNode(result_region, Type::ABIO);
4952  PhiNode*    result_memory = new(C) PhiNode(result_region, Type::MEMORY, adr_type);
4953  record_for_igvn(result_region);
4954  _gvn.set_type_bottom(result_i_o);
4955  _gvn.set_type_bottom(result_memory);
4956  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
4957
4958  // The slow_control path:
4959  Node* slow_control;
4960  Node* slow_i_o = i_o();
4961  Node* slow_mem = memory(adr_type);
4962  debug_only(slow_control = (Node*) badAddress);
4963
4964  // Checked control path:
4965  Node* checked_control = top();
4966  Node* checked_mem     = NULL;
4967  Node* checked_i_o     = NULL;
4968  Node* checked_value   = NULL;
4969
4970  if (basic_elem_type == T_CONFLICT) {
4971    assert(!dest_uninitialized, "");
4972    Node* cv = generate_generic_arraycopy(adr_type,
4973                                          src, src_offset, dest, dest_offset,
4974                                          copy_length, dest_uninitialized);
4975    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4976    checked_control = control();
4977    checked_i_o     = i_o();
4978    checked_mem     = memory(adr_type);
4979    checked_value   = cv;
4980    set_control(top());         // no fast path
4981  }
4982
4983  Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
4984  if (not_pos != NULL) {
4985    PreserveJVMState pjvms(this);
4986    set_control(not_pos);
4987
4988    // (6) length must not be negative.
4989    if (!length_never_negative) {
4990      generate_negative_guard(copy_length, slow_region);
4991    }
4992
4993    // copy_length is 0.
4994    if (!stopped() && dest_uninitialized) {
4995      Node* dest_length = alloc->in(AllocateNode::ALength);
4996      if (copy_length->eqv_uncast(dest_length)
4997          || _gvn.find_int_con(dest_length, 1) <= 0) {
4998        // There is no zeroing to do. No need for a secondary raw memory barrier.
4999      } else {
5000        // Clear the whole thing since there are no source elements to copy.
5001        generate_clear_array(adr_type, dest, basic_elem_type,
5002                             intcon(0), NULL,
5003                             alloc->in(AllocateNode::AllocSize));
5004        // Use a secondary InitializeNode as raw memory barrier.
5005        // Currently it is needed only on this path since other
5006        // paths have stub or runtime calls as raw memory barriers.
5007        InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
5008                                                       Compile::AliasIdxRaw,
5009                                                       top())->as_Initialize();
5010        init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
5011      }
5012    }
5013
5014    // Present the results of the fast call.
5015    result_region->init_req(zero_path, control());
5016    result_i_o   ->init_req(zero_path, i_o());
5017    result_memory->init_req(zero_path, memory(adr_type));
5018  }
5019
5020  if (!stopped() && dest_uninitialized) {
5021    // We have to initialize the *uncopied* part of the array to zero.
5022    // The copy destination is the slice dest[off..off+len].  The other slices
5023    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
5024    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
5025    Node* dest_length = alloc->in(AllocateNode::ALength);
5026    Node* dest_tail   = _gvn.transform(new(C) AddINode(dest_offset,
5027                                                          copy_length));
5028
5029    // If there is a head section that needs zeroing, do it now.
5030    if (find_int_con(dest_offset, -1) != 0) {
5031      generate_clear_array(adr_type, dest, basic_elem_type,
5032                           intcon(0), dest_offset,
5033                           NULL);
5034    }
5035
5036    // Next, perform a dynamic check on the tail length.
5037    // It is often zero, and we can win big if we prove this.
5038    // There are two wins:  Avoid generating the ClearArray
5039    // with its attendant messy index arithmetic, and upgrade
5040    // the copy to a more hardware-friendly word size of 64 bits.
5041    Node* tail_ctl = NULL;
5042    if (!stopped() && !dest_tail->eqv_uncast(dest_length)) {
5043      Node* cmp_lt   = _gvn.transform(new(C) CmpINode(dest_tail, dest_length));
5044      Node* bol_lt   = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt));
5045      tail_ctl = generate_slow_guard(bol_lt, NULL);
5046      assert(tail_ctl != NULL || !stopped(), "must be an outcome");
5047    }
5048
5049    // At this point, let's assume there is no tail.
5050    if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
5051      // There is no tail.  Try an upgrade to a 64-bit copy.
5052      bool didit = false;
5053      { PreserveJVMState pjvms(this);
5054        didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
5055                                         src, src_offset, dest, dest_offset,
5056                                         dest_size, dest_uninitialized);
5057        if (didit) {
5058          // Present the results of the block-copying fast call.
5059          result_region->init_req(bcopy_path, control());
5060          result_i_o   ->init_req(bcopy_path, i_o());
5061          result_memory->init_req(bcopy_path, memory(adr_type));
5062        }
5063      }
5064      if (didit)
5065        set_control(top());     // no regular fast path
5066    }
5067
5068    // Clear the tail, if any.
5069    if (tail_ctl != NULL) {
5070      Node* notail_ctl = stopped() ? NULL : control();
5071      set_control(tail_ctl);
5072      if (notail_ctl == NULL) {
5073        generate_clear_array(adr_type, dest, basic_elem_type,
5074                             dest_tail, NULL,
5075                             dest_size);
5076      } else {
5077        // Make a local merge.
5078        Node* done_ctl = new(C) RegionNode(3);
5079        Node* done_mem = new(C) PhiNode(done_ctl, Type::MEMORY, adr_type);
5080        done_ctl->init_req(1, notail_ctl);
5081        done_mem->init_req(1, memory(adr_type));
5082        generate_clear_array(adr_type, dest, basic_elem_type,
5083                             dest_tail, NULL,
5084                             dest_size);
5085        done_ctl->init_req(2, control());
5086        done_mem->init_req(2, memory(adr_type));
5087        set_control( _gvn.transform(done_ctl));
5088        set_memory(  _gvn.transform(done_mem), adr_type );
5089      }
5090    }
5091  }
5092
5093  BasicType copy_type = basic_elem_type;
5094  assert(basic_elem_type != T_ARRAY, "caller must fix this");
5095  if (!stopped() && copy_type == T_OBJECT) {
5096    // If src and dest have compatible element types, we can copy bits.
5097    // Types S[] and D[] are compatible if D is a supertype of S.
5098    //
5099    // If they are not, we will use checked_oop_disjoint_arraycopy,
5100    // which performs a fast optimistic per-oop check, and backs off
5101    // further to JVM_ArrayCopy on the first per-oop check that fails.
5102    // (Actually, we don't move raw bits only; the GC requires card marks.)
5103
5104    // Get the Klass* for both src and dest
5105    Node* src_klass  = load_object_klass(src);
5106    Node* dest_klass = load_object_klass(dest);
5107
5108    // Generate the subtype check.
5109    // This might fold up statically, or then again it might not.
5110    //
5111    // Non-static example:  Copying List<String>.elements to a new String[].
5112    // The backing store for a List<String> is always an Object[],
5113    // but its elements are always type String, if the generic types
5114    // are correct at the source level.
5115    //
5116    // Test S[] against D[], not S against D, because (probably)
5117    // the secondary supertype cache is less busy for S[] than S.
5118    // This usually only matters when D is an interface.
5119    Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
5120    // Plug failing path into checked_oop_disjoint_arraycopy
5121    if (not_subtype_ctrl != top()) {
5122      PreserveJVMState pjvms(this);
5123      set_control(not_subtype_ctrl);
5124      // (At this point we can assume disjoint_bases, since types differ.)
5125      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
5126      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
5127      Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
5128      Node* dest_elem_klass = _gvn.transform(n1);
5129      Node* cv = generate_checkcast_arraycopy(adr_type,
5130                                              dest_elem_klass,
5131                                              src, src_offset, dest, dest_offset,
5132                                              ConvI2X(copy_length), dest_uninitialized);
5133      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
5134      checked_control = control();
5135      checked_i_o     = i_o();
5136      checked_mem     = memory(adr_type);
5137      checked_value   = cv;
5138    }
5139    // At this point we know we do not need type checks on oop stores.
5140
5141    // Let's see if we need card marks:
5142    if (alloc != NULL && use_ReduceInitialCardMarks()) {
5143      // If we do not need card marks, copy using the jint or jlong stub.
5144      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
5145      assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
5146             "sizes agree");
5147    }
5148  }
5149
5150  if (!stopped()) {
5151    // Generate the fast path, if possible.
5152    PreserveJVMState pjvms(this);
5153    generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
5154                                 src, src_offset, dest, dest_offset,
5155                                 ConvI2X(copy_length), dest_uninitialized);
5156
5157    // Present the results of the fast call.
5158    result_region->init_req(fast_path, control());
5159    result_i_o   ->init_req(fast_path, i_o());
5160    result_memory->init_req(fast_path, memory(adr_type));
5161  }
5162
5163  // Here are all the slow paths up to this point, in one bundle:
5164  slow_control = top();
5165  if (slow_region != NULL)
5166    slow_control = _gvn.transform(slow_region);
5167  DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
5168
5169  set_control(checked_control);
5170  if (!stopped()) {
5171    // Clean up after the checked call.
5172    // The returned value is either 0 or -1^K,
5173    // where K = number of partially transferred array elements.
5174    Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0)));
5175    Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq));
5176    IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
5177
5178    // If it is 0, we are done, so transfer to the end.
5179    Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff));
5180    result_region->init_req(checked_path, checks_done);
5181    result_i_o   ->init_req(checked_path, checked_i_o);
5182    result_memory->init_req(checked_path, checked_mem);
5183
5184    // If it is not zero, merge into the slow call.
5185    set_control( _gvn.transform(new(C) IfFalseNode(iff) ));
5186    RegionNode* slow_reg2 = new(C) RegionNode(3);
5187    PhiNode*    slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO);
5188    PhiNode*    slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type);
5189    record_for_igvn(slow_reg2);
5190    slow_reg2  ->init_req(1, slow_control);
5191    slow_i_o2  ->init_req(1, slow_i_o);
5192    slow_mem2  ->init_req(1, slow_mem);
5193    slow_reg2  ->init_req(2, control());
5194    slow_i_o2  ->init_req(2, checked_i_o);
5195    slow_mem2  ->init_req(2, checked_mem);
5196
5197    slow_control = _gvn.transform(slow_reg2);
5198    slow_i_o     = _gvn.transform(slow_i_o2);
5199    slow_mem     = _gvn.transform(slow_mem2);
5200
5201    if (alloc != NULL) {
5202      // We'll restart from the very beginning, after zeroing the whole thing.
5203      // This can cause double writes, but that's OK since dest is brand new.
5204      // So we ignore the low 31 bits of the value returned from the stub.
5205    } else {
5206      // We must continue the copy exactly where it failed, or else
5207      // another thread might see the wrong number of writes to dest.
5208      Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1)));
5209      Node* slow_offset    = new(C) PhiNode(slow_reg2, TypeInt::INT);
5210      slow_offset->init_req(1, intcon(0));
5211      slow_offset->init_req(2, checked_offset);
5212      slow_offset  = _gvn.transform(slow_offset);
5213
5214      // Adjust the arguments by the conditionally incoming offset.
5215      Node* src_off_plus  = _gvn.transform(new(C) AddINode(src_offset,  slow_offset));
5216      Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset));
5217      Node* length_minus  = _gvn.transform(new(C) SubINode(copy_length, slow_offset));
5218
5219      // Tweak the node variables to adjust the code produced below:
5220      src_offset  = src_off_plus;
5221      dest_offset = dest_off_plus;
5222      copy_length = length_minus;
5223    }
5224  }
5225
5226  set_control(slow_control);
5227  if (!stopped()) {
5228    // Generate the slow path, if needed.
5229    PreserveJVMState pjvms(this);   // replace_in_map may trash the map
5230
5231    set_memory(slow_mem, adr_type);
5232    set_i_o(slow_i_o);
5233
5234    if (dest_uninitialized) {
5235      generate_clear_array(adr_type, dest, basic_elem_type,
5236                           intcon(0), NULL,
5237                           alloc->in(AllocateNode::AllocSize));
5238    }
5239
5240    generate_slow_arraycopy(adr_type,
5241                            src, src_offset, dest, dest_offset,
5242                            copy_length, /*dest_uninitialized*/false);
5243
5244    result_region->init_req(slow_call_path, control());
5245    result_i_o   ->init_req(slow_call_path, i_o());
5246    result_memory->init_req(slow_call_path, memory(adr_type));
5247  }
5248
5249  // Remove unused edges.
5250  for (uint i = 1; i < result_region->req(); i++) {
5251    if (result_region->in(i) == NULL)
5252      result_region->init_req(i, top());
5253  }
5254
5255  // Finished; return the combined state.
5256  set_control( _gvn.transform(result_region));
5257  set_i_o(     _gvn.transform(result_i_o)    );
5258  set_memory(  _gvn.transform(result_memory), adr_type );
5259
5260  // The memory edges above are precise in order to model effects around
5261  // array copies accurately to allow value numbering of field loads around
5262  // arraycopy.  Such field loads, both before and after, are common in Java
5263  // collections and similar classes involving header/array data structures.
5264  //
5265  // But with low number of register or when some registers are used or killed
5266  // by arraycopy calls it causes registers spilling on stack. See 6544710.
5267  // The next memory barrier is added to avoid it. If the arraycopy can be
5268  // optimized away (which it can, sometimes) then we can manually remove
5269  // the membar also.
5270  //
5271  // Do not let reads from the cloned object float above the arraycopy.
5272  if (alloc != NULL) {
5273    // Do not let stores that initialize this object be reordered with
5274    // a subsequent store that would make this object accessible by
5275    // other threads.
5276    // Record what AllocateNode this StoreStore protects so that
5277    // escape analysis can go from the MemBarStoreStoreNode to the
5278    // AllocateNode and eliminate the MemBarStoreStoreNode if possible
5279    // based on the escape status of the AllocateNode.
5280    insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out(AllocateNode::RawAddress));
5281  } else if (InsertMemBarAfterArraycopy)
5282    insert_mem_bar(Op_MemBarCPUOrder);
5283}
5284
5285
5286// Helper function which determines if an arraycopy immediately follows
5287// an allocation, with no intervening tests or other escapes for the object.
5288AllocateArrayNode*
5289LibraryCallKit::tightly_coupled_allocation(Node* ptr,
5290                                           RegionNode* slow_region) {
5291  if (stopped())             return NULL;  // no fast path
5292  if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
5293
5294  AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
5295  if (alloc == NULL)  return NULL;
5296
5297  Node* rawmem = memory(Compile::AliasIdxRaw);
5298  // Is the allocation's memory state untouched?
5299  if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
5300    // Bail out if there have been raw-memory effects since the allocation.
5301    // (Example:  There might have been a call or safepoint.)
5302    return NULL;
5303  }
5304  rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
5305  if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
5306    return NULL;
5307  }
5308
5309  // There must be no unexpected observers of this allocation.
5310  for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
5311    Node* obs = ptr->fast_out(i);
5312    if (obs != this->map()) {
5313      return NULL;
5314    }
5315  }
5316
5317  // This arraycopy must unconditionally follow the allocation of the ptr.
5318  Node* alloc_ctl = ptr->in(0);
5319  assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
5320
5321  Node* ctl = control();
5322  while (ctl != alloc_ctl) {
5323    // There may be guards which feed into the slow_region.
5324    // Any other control flow means that we might not get a chance
5325    // to finish initializing the allocated object.
5326    if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
5327      IfNode* iff = ctl->in(0)->as_If();
5328      Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
5329      assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
5330      if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
5331        ctl = iff->in(0);       // This test feeds the known slow_region.
5332        continue;
5333      }
5334      // One more try:  Various low-level checks bottom out in
5335      // uncommon traps.  If the debug-info of the trap omits
5336      // any reference to the allocation, as we've already
5337      // observed, then there can be no objection to the trap.
5338      bool found_trap = false;
5339      for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
5340        Node* obs = not_ctl->fast_out(j);
5341        if (obs->in(0) == not_ctl && obs->is_Call() &&
5342            (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) {
5343          found_trap = true; break;
5344        }
5345      }
5346      if (found_trap) {
5347        ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
5348        continue;
5349      }
5350    }
5351    return NULL;
5352  }
5353
5354  // If we get this far, we have an allocation which immediately
5355  // precedes the arraycopy, and we can take over zeroing the new object.
5356  // The arraycopy will finish the initialization, and provide
5357  // a new control state to which we will anchor the destination pointer.
5358
5359  return alloc;
5360}
5361
5362// Helper for initialization of arrays, creating a ClearArray.
5363// It writes zero bits in [start..end), within the body of an array object.
5364// The memory effects are all chained onto the 'adr_type' alias category.
5365//
5366// Since the object is otherwise uninitialized, we are free
5367// to put a little "slop" around the edges of the cleared area,
5368// as long as it does not go back into the array's header,
5369// or beyond the array end within the heap.
5370//
5371// The lower edge can be rounded down to the nearest jint and the
5372// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
5373//
5374// Arguments:
5375//   adr_type           memory slice where writes are generated
5376//   dest               oop of the destination array
5377//   basic_elem_type    element type of the destination
5378//   slice_idx          array index of first element to store
5379//   slice_len          number of elements to store (or NULL)
5380//   dest_size          total size in bytes of the array object
5381//
5382// Exactly one of slice_len or dest_size must be non-NULL.
5383// If dest_size is non-NULL, zeroing extends to the end of the object.
5384// If slice_len is non-NULL, the slice_idx value must be a constant.
5385void
5386LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
5387                                     Node* dest,
5388                                     BasicType basic_elem_type,
5389                                     Node* slice_idx,
5390                                     Node* slice_len,
5391                                     Node* dest_size) {
5392  // one or the other but not both of slice_len and dest_size:
5393  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
5394  if (slice_len == NULL)  slice_len = top();
5395  if (dest_size == NULL)  dest_size = top();
5396
5397  // operate on this memory slice:
5398  Node* mem = memory(adr_type); // memory slice to operate on
5399
5400  // scaling and rounding of indexes:
5401  int scale = exact_log2(type2aelembytes(basic_elem_type));
5402  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5403  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
5404  int bump_bit  = (-1 << scale) & BytesPerInt;
5405
5406  // determine constant starts and ends
5407  const intptr_t BIG_NEG = -128;
5408  assert(BIG_NEG + 2*abase < 0, "neg enough");
5409  intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
5410  intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
5411  if (slice_len_con == 0) {
5412    return;                     // nothing to do here
5413  }
5414  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
5415  intptr_t end_con   = find_intptr_t_con(dest_size, -1);
5416  if (slice_idx_con >= 0 && slice_len_con >= 0) {
5417    assert(end_con < 0, "not two cons");
5418    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
5419                       BytesPerLong);
5420  }
5421
5422  if (start_con >= 0 && end_con >= 0) {
5423    // Constant start and end.  Simple.
5424    mem = ClearArrayNode::clear_memory(control(), mem, dest,
5425                                       start_con, end_con, &_gvn);
5426  } else if (start_con >= 0 && dest_size != top()) {
5427    // Constant start, pre-rounded end after the tail of the array.
5428    Node* end = dest_size;
5429    mem = ClearArrayNode::clear_memory(control(), mem, dest,
5430                                       start_con, end, &_gvn);
5431  } else if (start_con >= 0 && slice_len != top()) {
5432    // Constant start, non-constant end.  End needs rounding up.
5433    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
5434    intptr_t end_base  = abase + (slice_idx_con << scale);
5435    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
5436    Node*    end       = ConvI2X(slice_len);
5437    if (scale != 0)
5438      end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) ));
5439    end_base += end_round;
5440    end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base)));
5441    end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round)));
5442    mem = ClearArrayNode::clear_memory(control(), mem, dest,
5443                                       start_con, end, &_gvn);
5444  } else if (start_con < 0 && dest_size != top()) {
5445    // Non-constant start, pre-rounded end after the tail of the array.
5446    // This is almost certainly a "round-to-end" operation.
5447    Node* start = slice_idx;
5448    start = ConvI2X(start);
5449    if (scale != 0)
5450      start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) ));
5451    start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase)));
5452    if ((bump_bit | clear_low) != 0) {
5453      int to_clear = (bump_bit | clear_low);
5454      // Align up mod 8, then store a jint zero unconditionally
5455      // just before the mod-8 boundary.
5456      if (((abase + bump_bit) & ~to_clear) - bump_bit
5457          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
5458        bump_bit = 0;
5459        assert((abase & to_clear) == 0, "array base must be long-aligned");
5460      } else {
5461        // Bump 'start' up to (or past) the next jint boundary:
5462        start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit)));
5463        assert((abase & clear_low) == 0, "array base must be int-aligned");
5464      }
5465      // Round bumped 'start' down to jlong boundary in body of array.
5466      start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
5467      if (bump_bit != 0) {
5468        // Store a zero to the immediately preceding jint:
5469        Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
5470        Node* p1 = basic_plus_adr(dest, x1);
5471        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
5472        mem = _gvn.transform(mem);
5473      }
5474    }
5475    Node* end = dest_size; // pre-rounded
5476    mem = ClearArrayNode::clear_memory(control(), mem, dest,
5477                                       start, end, &_gvn);
5478  } else {
5479    // Non-constant start, unrounded non-constant end.
5480    // (Nobody zeroes a random midsection of an array using this routine.)
5481    ShouldNotReachHere();       // fix caller
5482  }
5483
5484  // Done.
5485  set_memory(mem, adr_type);
5486}
5487
5488
5489bool
5490LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5491                                         BasicType basic_elem_type,
5492                                         AllocateNode* alloc,
5493                                         Node* src,  Node* src_offset,
5494                                         Node* dest, Node* dest_offset,
5495                                         Node* dest_size, bool dest_uninitialized) {
5496  // See if there is an advantage from block transfer.
5497  int scale = exact_log2(type2aelembytes(basic_elem_type));
5498  if (scale >= LogBytesPerLong)
5499    return false;               // it is already a block transfer
5500
5501  // Look at the alignment of the starting offsets.
5502  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5503
5504  intptr_t src_off_con  = (intptr_t) find_int_con(src_offset, -1);
5505  intptr_t dest_off_con = (intptr_t) find_int_con(dest_offset, -1);
5506  if (src_off_con < 0 || dest_off_con < 0)
5507    // At present, we can only understand constants.
5508    return false;
5509
5510  intptr_t src_off  = abase + (src_off_con  << scale);
5511  intptr_t dest_off = abase + (dest_off_con << scale);
5512
5513  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
5514    // Non-aligned; too bad.
5515    // One more chance:  Pick off an initial 32-bit word.
5516    // This is a common case, since abase can be odd mod 8.
5517    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5518        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5519      Node* sptr = basic_plus_adr(src,  src_off);
5520      Node* dptr = basic_plus_adr(dest, dest_off);
5521      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
5522      store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
5523      src_off += BytesPerInt;
5524      dest_off += BytesPerInt;
5525    } else {
5526      return false;
5527    }
5528  }
5529  assert(src_off % BytesPerLong == 0, "");
5530  assert(dest_off % BytesPerLong == 0, "");
5531
5532  // Do this copy by giant steps.
5533  Node* sptr  = basic_plus_adr(src,  src_off);
5534  Node* dptr  = basic_plus_adr(dest, dest_off);
5535  Node* countx = dest_size;
5536  countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off)));
5537  countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong)));
5538
5539  bool disjoint_bases = true;   // since alloc != NULL
5540  generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5541                               sptr, NULL, dptr, NULL, countx, dest_uninitialized);
5542
5543  return true;
5544}
5545
5546
5547// Helper function; generates code for the slow case.
5548// We make a call to a runtime method which emulates the native method,
5549// but without the native wrapper overhead.
5550void
5551LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5552                                        Node* src,  Node* src_offset,
5553                                        Node* dest, Node* dest_offset,
5554                                        Node* copy_length, bool dest_uninitialized) {
5555  assert(!dest_uninitialized, "Invariant");
5556  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5557                                 OptoRuntime::slow_arraycopy_Type(),
5558                                 OptoRuntime::slow_arraycopy_Java(),
5559                                 "slow_arraycopy", adr_type,
5560                                 src, src_offset, dest, dest_offset,
5561                                 copy_length);
5562
5563  // Handle exceptions thrown by this fellow:
5564  make_slow_call_ex(call, env()->Throwable_klass(), false);
5565}
5566
5567// Helper function; generates code for cases requiring runtime checks.
5568Node*
5569LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5570                                             Node* dest_elem_klass,
5571                                             Node* src,  Node* src_offset,
5572                                             Node* dest, Node* dest_offset,
5573                                             Node* copy_length, bool dest_uninitialized) {
5574  if (stopped())  return NULL;
5575
5576  address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized);
5577  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5578    return NULL;
5579  }
5580
5581  // Pick out the parameters required to perform a store-check
5582  // for the target array.  This is an optimistic check.  It will
5583  // look in each non-null element's class, at the desired klass's
5584  // super_check_offset, for the desired klass.
5585  int sco_offset = in_bytes(Klass::super_check_offset_offset());
5586  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5587  Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
5588  Node* check_offset = ConvI2X(_gvn.transform(n3));
5589  Node* check_value  = dest_elem_klass;
5590
5591  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
5592  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5593
5594  // (We know the arrays are never conjoint, because their types differ.)
5595  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5596                                 OptoRuntime::checkcast_arraycopy_Type(),
5597                                 copyfunc_addr, "checkcast_arraycopy", adr_type,
5598                                 // five arguments, of which two are
5599                                 // intptr_t (jlong in LP64)
5600                                 src_start, dest_start,
5601                                 copy_length XTOP,
5602                                 check_offset XTOP,
5603                                 check_value);
5604
5605  return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5606}
5607
5608
5609// Helper function; generates code for cases requiring runtime checks.
5610Node*
5611LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5612                                           Node* src,  Node* src_offset,
5613                                           Node* dest, Node* dest_offset,
5614                                           Node* copy_length, bool dest_uninitialized) {
5615  assert(!dest_uninitialized, "Invariant");
5616  if (stopped())  return NULL;
5617  address copyfunc_addr = StubRoutines::generic_arraycopy();
5618  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5619    return NULL;
5620  }
5621
5622  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5623                    OptoRuntime::generic_arraycopy_Type(),
5624                    copyfunc_addr, "generic_arraycopy", adr_type,
5625                    src, src_offset, dest, dest_offset, copy_length);
5626
5627  return _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5628}
5629
5630// Helper function; generates the fast out-of-line call to an arraycopy stub.
5631void
5632LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5633                                             BasicType basic_elem_type,
5634                                             bool disjoint_bases,
5635                                             Node* src,  Node* src_offset,
5636                                             Node* dest, Node* dest_offset,
5637                                             Node* copy_length, bool dest_uninitialized) {
5638  if (stopped())  return;               // nothing to do
5639
5640  Node* src_start  = src;
5641  Node* dest_start = dest;
5642  if (src_offset != NULL || dest_offset != NULL) {
5643    assert(src_offset != NULL && dest_offset != NULL, "");
5644    src_start  = array_element_address(src,  src_offset,  basic_elem_type);
5645    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5646  }
5647
5648  // Figure out which arraycopy runtime method to call.
5649  const char* copyfunc_name = "arraycopy";
5650  address     copyfunc_addr =
5651      basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5652                          disjoint_bases, copyfunc_name, dest_uninitialized);
5653
5654  // Call it.  Note that the count_ix value is not scaled to a byte-size.
5655  make_runtime_call(RC_LEAF|RC_NO_FP,
5656                    OptoRuntime::fast_arraycopy_Type(),
5657                    copyfunc_addr, copyfunc_name, adr_type,
5658                    src_start, dest_start, copy_length XTOP);
5659}
5660
5661//-------------inline_encodeISOArray-----------------------------------
5662// encode char[] to byte[] in ISO_8859_1
5663bool LibraryCallKit::inline_encodeISOArray() {
5664  assert(callee()->signature()->size() == 5, "encodeISOArray has 5 parameters");
5665  // no receiver since it is static method
5666  Node *src         = argument(0);
5667  Node *src_offset  = argument(1);
5668  Node *dst         = argument(2);
5669  Node *dst_offset  = argument(3);
5670  Node *length      = argument(4);
5671
5672  const Type* src_type = src->Value(&_gvn);
5673  const Type* dst_type = dst->Value(&_gvn);
5674  const TypeAryPtr* top_src = src_type->isa_aryptr();
5675  const TypeAryPtr* top_dest = dst_type->isa_aryptr();
5676  if (top_src  == NULL || top_src->klass()  == NULL ||
5677      top_dest == NULL || top_dest->klass() == NULL) {
5678    // failed array check
5679    return false;
5680  }
5681
5682  // Figure out the size and type of the elements we will be copying.
5683  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5684  BasicType dst_elem = dst_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5685  if (src_elem != T_CHAR || dst_elem != T_BYTE) {
5686    return false;
5687  }
5688  Node* src_start = array_element_address(src, src_offset, src_elem);
5689  Node* dst_start = array_element_address(dst, dst_offset, dst_elem);
5690  // 'src_start' points to src array + scaled offset
5691  // 'dst_start' points to dst array + scaled offset
5692
5693  const TypeAryPtr* mtype = TypeAryPtr::BYTES;
5694  Node* enc = new (C) EncodeISOArrayNode(control(), memory(mtype), src_start, dst_start, length);
5695  enc = _gvn.transform(enc);
5696  Node* res_mem = _gvn.transform(new (C) SCMemProjNode(enc));
5697  set_memory(res_mem, mtype);
5698  set_result(enc);
5699  return true;
5700}
5701
5702/**
5703 * Calculate CRC32 for byte.
5704 * int java.util.zip.CRC32.update(int crc, int b)
5705 */
5706bool LibraryCallKit::inline_updateCRC32() {
5707  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5708  assert(callee()->signature()->size() == 2, "update has 2 parameters");
5709  // no receiver since it is static method
5710  Node* crc  = argument(0); // type: int
5711  Node* b    = argument(1); // type: int
5712
5713  /*
5714   *    int c = ~ crc;
5715   *    b = timesXtoThe32[(b ^ c) & 0xFF];
5716   *    b = b ^ (c >>> 8);
5717   *    crc = ~b;
5718   */
5719
5720  Node* M1 = intcon(-1);
5721  crc = _gvn.transform(new (C) XorINode(crc, M1));
5722  Node* result = _gvn.transform(new (C) XorINode(crc, b));
5723  result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
5724
5725  Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5726  Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
5727  Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5728  result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5729
5730  crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
5731  result = _gvn.transform(new (C) XorINode(crc, result));
5732  result = _gvn.transform(new (C) XorINode(result, M1));
5733  set_result(result);
5734  return true;
5735}
5736
5737/**
5738 * Calculate CRC32 for byte[] array.
5739 * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len)
5740 */
5741bool LibraryCallKit::inline_updateBytesCRC32() {
5742  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5743  assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters");
5744  // no receiver since it is static method
5745  Node* crc     = argument(0); // type: int
5746  Node* src     = argument(1); // type: oop
5747  Node* offset  = argument(2); // type: int
5748  Node* length  = argument(3); // type: int
5749
5750  const Type* src_type = src->Value(&_gvn);
5751  const TypeAryPtr* top_src = src_type->isa_aryptr();
5752  if (top_src  == NULL || top_src->klass()  == NULL) {
5753    // failed array check
5754    return false;
5755  }
5756
5757  // Figure out the size and type of the elements we will be copying.
5758  BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type();
5759  if (src_elem != T_BYTE) {
5760    return false;
5761  }
5762
5763  // 'src_start' points to src array + scaled offset
5764  Node* src_start = array_element_address(src, offset, src_elem);
5765
5766  // We assume that range check is done by caller.
5767  // TODO: generate range check (offset+length < src.length) in debug VM.
5768
5769  // Call the stub.
5770  address stubAddr = StubRoutines::updateBytesCRC32();
5771  const char *stubName = "updateBytesCRC32";
5772
5773  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5774                                 stubAddr, stubName, TypePtr::BOTTOM,
5775                                 crc, src_start, length);
5776  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5777  set_result(result);
5778  return true;
5779}
5780
5781/**
5782 * Calculate CRC32 for ByteBuffer.
5783 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
5784 */
5785bool LibraryCallKit::inline_updateByteBufferCRC32() {
5786  assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support");
5787  assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long");
5788  // no receiver since it is static method
5789  Node* crc     = argument(0); // type: int
5790  Node* src     = argument(1); // type: long
5791  Node* offset  = argument(3); // type: int
5792  Node* length  = argument(4); // type: int
5793
5794  src = ConvL2X(src);  // adjust Java long to machine word
5795  Node* base = _gvn.transform(new (C) CastX2PNode(src));
5796  offset = ConvI2X(offset);
5797
5798  // 'src_start' points to src array + scaled offset
5799  Node* src_start = basic_plus_adr(top(), base, offset);
5800
5801  // Call the stub.
5802  address stubAddr = StubRoutines::updateBytesCRC32();
5803  const char *stubName = "updateBytesCRC32";
5804
5805  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(),
5806                                 stubAddr, stubName, TypePtr::BOTTOM,
5807                                 crc, src_start, length);
5808  Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
5809  set_result(result);
5810  return true;
5811}
5812
5813//----------------------------inline_reference_get----------------------------
5814// public T java.lang.ref.Reference.get();
5815bool LibraryCallKit::inline_reference_get() {
5816  const int referent_offset = java_lang_ref_Reference::referent_offset;
5817  guarantee(referent_offset > 0, "should have already been set");
5818
5819  // Get the argument:
5820  Node* reference_obj = null_check_receiver();
5821  if (stopped()) return true;
5822
5823  Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
5824
5825  ciInstanceKlass* klass = env()->Object_klass();
5826  const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5827
5828  Node* no_ctrl = NULL;
5829  Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5830
5831  // Use the pre-barrier to record the value in the referent field
5832  pre_barrier(false /* do_load */,
5833              control(),
5834              NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5835              result /* pre_val */,
5836              T_OBJECT);
5837
5838  // Add memory barrier to prevent commoning reads from this field
5839  // across safepoint since GC can change its value.
5840  insert_mem_bar(Op_MemBarCPUOrder);
5841
5842  set_result(result);
5843  return true;
5844}
5845
5846
5847Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString,
5848                                              bool is_exact=true, bool is_static=false) {
5849
5850  const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr();
5851  assert(tinst != NULL, "obj is null");
5852  assert(tinst->klass()->is_loaded(), "obj is not loaded");
5853  assert(!is_exact || tinst->klass_is_exact(), "klass not exact");
5854
5855  ciField* field = tinst->klass()->as_instance_klass()->get_field_by_name(ciSymbol::make(fieldName),
5856                                                                          ciSymbol::make(fieldTypeString),
5857                                                                          is_static);
5858  if (field == NULL) return (Node *) NULL;
5859  assert (field != NULL, "undefined field");
5860
5861  // Next code  copied from Parse::do_get_xxx():
5862
5863  // Compute address and memory type.
5864  int offset  = field->offset_in_bytes();
5865  bool is_vol = field->is_volatile();
5866  ciType* field_klass = field->type();
5867  assert(field_klass->is_loaded(), "should be loaded");
5868  const TypePtr* adr_type = C->alias_type(field)->adr_type();
5869  Node *adr = basic_plus_adr(fromObj, fromObj, offset);
5870  BasicType bt = field->layout_type();
5871
5872  // Build the resultant type of the load
5873  const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5874
5875  // Build the load.
5876  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
5877  return loadedField;
5878}
5879
5880
5881//------------------------------inline_aescrypt_Block-----------------------
5882bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) {
5883  address stubAddr;
5884  const char *stubName;
5885  assert(UseAES, "need AES instruction support");
5886
5887  switch(id) {
5888  case vmIntrinsics::_aescrypt_encryptBlock:
5889    stubAddr = StubRoutines::aescrypt_encryptBlock();
5890    stubName = "aescrypt_encryptBlock";
5891    break;
5892  case vmIntrinsics::_aescrypt_decryptBlock:
5893    stubAddr = StubRoutines::aescrypt_decryptBlock();
5894    stubName = "aescrypt_decryptBlock";
5895    break;
5896  }
5897  if (stubAddr == NULL) return false;
5898
5899  Node* aescrypt_object = argument(0);
5900  Node* src             = argument(1);
5901  Node* src_offset      = argument(2);
5902  Node* dest            = argument(3);
5903  Node* dest_offset     = argument(4);
5904
5905  // (1) src and dest are arrays.
5906  const Type* src_type = src->Value(&_gvn);
5907  const Type* dest_type = dest->Value(&_gvn);
5908  const TypeAryPtr* top_src = src_type->isa_aryptr();
5909  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5910  assert (top_src  != NULL && top_src->klass()  != NULL &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5911
5912  // for the quick and dirty code we will skip all the checks.
5913  // we are just trying to get the call to be generated.
5914  Node* src_start  = src;
5915  Node* dest_start = dest;
5916  if (src_offset != NULL || dest_offset != NULL) {
5917    assert(src_offset != NULL && dest_offset != NULL, "");
5918    src_start  = array_element_address(src,  src_offset,  T_BYTE);
5919    dest_start = array_element_address(dest, dest_offset, T_BYTE);
5920  }
5921
5922  // now need to get the start of its expanded key array
5923  // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5924  Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
5925  if (k_start == NULL) return false;
5926
5927  if (Matcher::pass_original_key_for_aes()) {
5928    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
5929    // compatibility issues between Java key expansion and SPARC crypto instructions
5930    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
5931    if (original_k_start == NULL) return false;
5932
5933    // Call the stub.
5934    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5935                      stubAddr, stubName, TypePtr::BOTTOM,
5936                      src_start, dest_start, k_start, original_k_start);
5937  } else {
5938    // Call the stub.
5939    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
5940                      stubAddr, stubName, TypePtr::BOTTOM,
5941                      src_start, dest_start, k_start);
5942  }
5943
5944  return true;
5945}
5946
5947//------------------------------inline_cipherBlockChaining_AESCrypt-----------------------
5948bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
5949  address stubAddr;
5950  const char *stubName;
5951
5952  assert(UseAES, "need AES instruction support");
5953
5954  switch(id) {
5955  case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
5956    stubAddr = StubRoutines::cipherBlockChaining_encryptAESCrypt();
5957    stubName = "cipherBlockChaining_encryptAESCrypt";
5958    break;
5959  case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
5960    stubAddr = StubRoutines::cipherBlockChaining_decryptAESCrypt();
5961    stubName = "cipherBlockChaining_decryptAESCrypt";
5962    break;
5963  }
5964  if (stubAddr == NULL) return false;
5965
5966  Node* cipherBlockChaining_object = argument(0);
5967  Node* src                        = argument(1);
5968  Node* src_offset                 = argument(2);
5969  Node* len                        = argument(3);
5970  Node* dest                       = argument(4);
5971  Node* dest_offset                = argument(5);
5972
5973  // (1) src and dest are arrays.
5974  const Type* src_type = src->Value(&_gvn);
5975  const Type* dest_type = dest->Value(&_gvn);
5976  const TypeAryPtr* top_src = src_type->isa_aryptr();
5977  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
5978  assert (top_src  != NULL && top_src->klass()  != NULL
5979          &&  top_dest != NULL && top_dest->klass() != NULL, "args are strange");
5980
5981  // checks are the responsibility of the caller
5982  Node* src_start  = src;
5983  Node* dest_start = dest;
5984  if (src_offset != NULL || dest_offset != NULL) {
5985    assert(src_offset != NULL && dest_offset != NULL, "");
5986    src_start  = array_element_address(src,  src_offset,  T_BYTE);
5987    dest_start = array_element_address(dest, dest_offset, T_BYTE);
5988  }
5989
5990  // if we are in this set of code, we "know" the embeddedCipher is an AESCrypt object
5991  // (because of the predicated logic executed earlier).
5992  // so we cast it here safely.
5993  // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java
5994
5995  Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
5996  if (embeddedCipherObj == NULL) return false;
5997
5998  // cast it to what we know it will be at runtime
5999  const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
6000  assert(tinst != NULL, "CBC obj is null");
6001  assert(tinst->klass()->is_loaded(), "CBC obj is not loaded");
6002  ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6003  if (!klass_AESCrypt->is_loaded()) return false;
6004
6005  ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6006  const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
6007  const TypeOopPtr* xtype = aklass->as_instance_type();
6008  Node* aescrypt_object = new(C) CheckCastPPNode(control(), embeddedCipherObj, xtype);
6009  aescrypt_object = _gvn.transform(aescrypt_object);
6010
6011  // we need to get the start of the aescrypt_object's expanded key array
6012  Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
6013  if (k_start == NULL) return false;
6014
6015  // similarly, get the start address of the r vector
6016  Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B", /*is_exact*/ false);
6017  if (objRvec == NULL) return false;
6018  Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
6019
6020  Node* cbcCrypt;
6021  if (Matcher::pass_original_key_for_aes()) {
6022    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
6023    // compatibility issues between Java key expansion and SPARC crypto instructions
6024    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
6025    if (original_k_start == NULL) return false;
6026
6027    // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
6028    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6029                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6030                                 stubAddr, stubName, TypePtr::BOTTOM,
6031                                 src_start, dest_start, k_start, r_start, len, original_k_start);
6032  } else {
6033    // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
6034    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
6035                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
6036                                 stubAddr, stubName, TypePtr::BOTTOM,
6037                                 src_start, dest_start, k_start, r_start, len);
6038  }
6039
6040  // return cipher length (int)
6041  Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
6042  set_result(retvalue);
6043  return true;
6044}
6045
6046//------------------------------get_key_start_from_aescrypt_object-----------------------
6047Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) {
6048  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I", /*is_exact*/ false);
6049  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6050  if (objAESCryptKey == NULL) return (Node *) NULL;
6051
6052  // now have the array, need to get the start address of the K array
6053  Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT);
6054  return k_start;
6055}
6056
6057//------------------------------get_original_key_start_from_aescrypt_object-----------------------
6058Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
6059  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
6060  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
6061  if (objAESCryptKey == NULL) return (Node *) NULL;
6062
6063  // now have the array, need to get the start address of the lastKey array
6064  Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
6065  return original_k_start;
6066}
6067
6068//----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
6069// Return node representing slow path of predicate check.
6070// the pseudo code we want to emulate with this predicate is:
6071// for encryption:
6072//    if (embeddedCipherObj instanceof AESCrypt) do_intrinsic, else do_javapath
6073// for decryption:
6074//    if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath
6075//    note cipher==plain is more conservative than the original java code but that's OK
6076//
6077Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
6078  // First, check receiver for NULL since it is virtual method.
6079  Node* objCBC = argument(0);
6080  objCBC = null_check(objCBC);
6081
6082  if (stopped()) return NULL; // Always NULL
6083
6084  // Load embeddedCipher field of CipherBlockChaining object.
6085  Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false);
6086
6087  // get AESCrypt klass for instanceOf check
6088  // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point
6089  // will have same classloader as CipherBlockChaining object
6090  const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr();
6091  assert(tinst != NULL, "CBCobj is null");
6092  assert(tinst->klass()->is_loaded(), "CBCobj is not loaded");
6093
6094  // we want to do an instanceof comparison against the AESCrypt class
6095  ciKlass* klass_AESCrypt = tinst->klass()->as_instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
6096  if (!klass_AESCrypt->is_loaded()) {
6097    // if AESCrypt is not even loaded, we never take the intrinsic fast path
6098    Node* ctrl = control();
6099    set_control(top()); // no regular fast path
6100    return ctrl;
6101  }
6102  ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
6103
6104  Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
6105  Node* cmp_instof  = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
6106  Node* bool_instof  = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
6107
6108  Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN);
6109
6110  // for encryption, we are done
6111  if (!decrypting)
6112    return instof_false;  // even if it is NULL
6113
6114  // for decryption, we need to add a further check to avoid
6115  // taking the intrinsic path when cipher and plain are the same
6116  // see the original java code for why.
6117  RegionNode* region = new(C) RegionNode(3);
6118  region->init_req(1, instof_false);
6119  Node* src = argument(1);
6120  Node* dest = argument(4);
6121  Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
6122  Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
6123  Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
6124  region->init_req(2, src_dest_conjoint);
6125
6126  record_for_igvn(region);
6127  return _gvn.transform(region);
6128}
6129