library_call.cpp revision 836:4325cdaa78ad
1/*
2 * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_library_call.cpp.incl"
27
28class LibraryIntrinsic : public InlineCallGenerator {
29  // Extend the set of intrinsics known to the runtime:
30 public:
31 private:
32  bool             _is_virtual;
33  vmIntrinsics::ID _intrinsic_id;
34
35 public:
36  LibraryIntrinsic(ciMethod* m, bool is_virtual, vmIntrinsics::ID id)
37    : InlineCallGenerator(m),
38      _is_virtual(is_virtual),
39      _intrinsic_id(id)
40  {
41  }
42  virtual bool is_intrinsic() const { return true; }
43  virtual bool is_virtual()   const { return _is_virtual; }
44  virtual JVMState* generate(JVMState* jvms);
45  vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
46};
47
48
49// Local helper class for LibraryIntrinsic:
50class LibraryCallKit : public GraphKit {
51 private:
52  LibraryIntrinsic* _intrinsic;   // the library intrinsic being called
53
54 public:
55  LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
56    : GraphKit(caller),
57      _intrinsic(intrinsic)
58  {
59  }
60
61  ciMethod*         caller()    const    { return jvms()->method(); }
62  int               bci()       const    { return jvms()->bci(); }
63  LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
64  vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
65  ciMethod*         callee()    const    { return _intrinsic->method(); }
66  ciSignature*      signature() const    { return callee()->signature(); }
67  int               arg_size()  const    { return callee()->arg_size(); }
68
69  bool try_to_inline();
70
71  // Helper functions to inline natives
72  void push_result(RegionNode* region, PhiNode* value);
73  Node* generate_guard(Node* test, RegionNode* region, float true_prob);
74  Node* generate_slow_guard(Node* test, RegionNode* region);
75  Node* generate_fair_guard(Node* test, RegionNode* region);
76  Node* generate_negative_guard(Node* index, RegionNode* region,
77                                // resulting CastII of index:
78                                Node* *pos_index = NULL);
79  Node* generate_nonpositive_guard(Node* index, bool never_negative,
80                                   // resulting CastII of index:
81                                   Node* *pos_index = NULL);
82  Node* generate_limit_guard(Node* offset, Node* subseq_length,
83                             Node* array_length,
84                             RegionNode* region);
85  Node* generate_current_thread(Node* &tls_output);
86  address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset,
87                              bool disjoint_bases, const char* &name);
88  Node* load_mirror_from_klass(Node* klass);
89  Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
90                                      int nargs,
91                                      RegionNode* region, int null_path,
92                                      int offset);
93  Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs,
94                               RegionNode* region, int null_path) {
95    int offset = java_lang_Class::klass_offset_in_bytes();
96    return load_klass_from_mirror_common(mirror, never_see_null, nargs,
97                                         region, null_path,
98                                         offset);
99  }
100  Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
101                                     int nargs,
102                                     RegionNode* region, int null_path) {
103    int offset = java_lang_Class::array_klass_offset_in_bytes();
104    return load_klass_from_mirror_common(mirror, never_see_null, nargs,
105                                         region, null_path,
106                                         offset);
107  }
108  Node* generate_access_flags_guard(Node* kls,
109                                    int modifier_mask, int modifier_bits,
110                                    RegionNode* region);
111  Node* generate_interface_guard(Node* kls, RegionNode* region);
112  Node* generate_array_guard(Node* kls, RegionNode* region) {
113    return generate_array_guard_common(kls, region, false, false);
114  }
115  Node* generate_non_array_guard(Node* kls, RegionNode* region) {
116    return generate_array_guard_common(kls, region, false, true);
117  }
118  Node* generate_objArray_guard(Node* kls, RegionNode* region) {
119    return generate_array_guard_common(kls, region, true, false);
120  }
121  Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
122    return generate_array_guard_common(kls, region, true, true);
123  }
124  Node* generate_array_guard_common(Node* kls, RegionNode* region,
125                                    bool obj_array, bool not_array);
126  Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
127  CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
128                                     bool is_virtual = false, bool is_static = false);
129  CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
130    return generate_method_call(method_id, false, true);
131  }
132  CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) {
133    return generate_method_call(method_id, true, false);
134  }
135
136  bool inline_string_compareTo();
137  bool inline_string_indexOf();
138  Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
139  bool inline_string_equals();
140  Node* pop_math_arg();
141  bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
142  bool inline_math_native(vmIntrinsics::ID id);
143  bool inline_trig(vmIntrinsics::ID id);
144  bool inline_trans(vmIntrinsics::ID id);
145  bool inline_abs(vmIntrinsics::ID id);
146  bool inline_sqrt(vmIntrinsics::ID id);
147  bool inline_pow(vmIntrinsics::ID id);
148  bool inline_exp(vmIntrinsics::ID id);
149  bool inline_min_max(vmIntrinsics::ID id);
150  Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
151  // This returns Type::AnyPtr, RawPtr, or OopPtr.
152  int classify_unsafe_addr(Node* &base, Node* &offset);
153  Node* make_unsafe_address(Node* base, Node* offset);
154  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
155  bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
156  bool inline_unsafe_allocate();
157  bool inline_unsafe_copyMemory();
158  bool inline_native_currentThread();
159  bool inline_native_time_funcs(bool isNano);
160  bool inline_native_isInterrupted();
161  bool inline_native_Class_query(vmIntrinsics::ID id);
162  bool inline_native_subtype_check();
163
164  bool inline_native_newArray();
165  bool inline_native_getLength();
166  bool inline_array_copyOf(bool is_copyOfRange);
167  bool inline_array_equals();
168  void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
169  bool inline_native_clone(bool is_virtual);
170  bool inline_native_Reflection_getCallerClass();
171  bool inline_native_AtomicLong_get();
172  bool inline_native_AtomicLong_attemptUpdate();
173  bool is_method_invoke_or_aux_frame(JVMState* jvms);
174  // Helper function for inlining native object hash method
175  bool inline_native_hashcode(bool is_virtual, bool is_static);
176  bool inline_native_getClass();
177
178  // Helper functions for inlining arraycopy
179  bool inline_arraycopy();
180  void generate_arraycopy(const TypePtr* adr_type,
181                          BasicType basic_elem_type,
182                          Node* src,  Node* src_offset,
183                          Node* dest, Node* dest_offset,
184                          Node* copy_length,
185                          bool disjoint_bases = false,
186                          bool length_never_negative = false,
187                          RegionNode* slow_region = NULL);
188  AllocateArrayNode* tightly_coupled_allocation(Node* ptr,
189                                                RegionNode* slow_region);
190  void generate_clear_array(const TypePtr* adr_type,
191                            Node* dest,
192                            BasicType basic_elem_type,
193                            Node* slice_off,
194                            Node* slice_len,
195                            Node* slice_end);
196  bool generate_block_arraycopy(const TypePtr* adr_type,
197                                BasicType basic_elem_type,
198                                AllocateNode* alloc,
199                                Node* src,  Node* src_offset,
200                                Node* dest, Node* dest_offset,
201                                Node* dest_size);
202  void generate_slow_arraycopy(const TypePtr* adr_type,
203                               Node* src,  Node* src_offset,
204                               Node* dest, Node* dest_offset,
205                               Node* copy_length);
206  Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
207                                     Node* dest_elem_klass,
208                                     Node* src,  Node* src_offset,
209                                     Node* dest, Node* dest_offset,
210                                     Node* copy_length);
211  Node* generate_generic_arraycopy(const TypePtr* adr_type,
212                                   Node* src,  Node* src_offset,
213                                   Node* dest, Node* dest_offset,
214                                   Node* copy_length);
215  void generate_unchecked_arraycopy(const TypePtr* adr_type,
216                                    BasicType basic_elem_type,
217                                    bool disjoint_bases,
218                                    Node* src,  Node* src_offset,
219                                    Node* dest, Node* dest_offset,
220                                    Node* copy_length);
221  bool inline_unsafe_CAS(BasicType type);
222  bool inline_unsafe_ordered_store(BasicType type);
223  bool inline_fp_conversions(vmIntrinsics::ID id);
224  bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
225  bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
226  bool inline_bitCount(vmIntrinsics::ID id);
227  bool inline_reverseBytes(vmIntrinsics::ID id);
228};
229
230
231//---------------------------make_vm_intrinsic----------------------------
232CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
233  vmIntrinsics::ID id = m->intrinsic_id();
234  assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
235
236  if (DisableIntrinsic[0] != '\0'
237      && strstr(DisableIntrinsic, vmIntrinsics::name_at(id)) != NULL) {
238    // disabled by a user request on the command line:
239    // example: -XX:DisableIntrinsic=_hashCode,_getClass
240    return NULL;
241  }
242
243  if (!m->is_loaded()) {
244    // do not attempt to inline unloaded methods
245    return NULL;
246  }
247
248  // Only a few intrinsics implement a virtual dispatch.
249  // They are expensive calls which are also frequently overridden.
250  if (is_virtual) {
251    switch (id) {
252    case vmIntrinsics::_hashCode:
253    case vmIntrinsics::_clone:
254      // OK, Object.hashCode and Object.clone intrinsics come in both flavors
255      break;
256    default:
257      return NULL;
258    }
259  }
260
261  // -XX:-InlineNatives disables nearly all intrinsics:
262  if (!InlineNatives) {
263    switch (id) {
264    case vmIntrinsics::_indexOf:
265    case vmIntrinsics::_compareTo:
266    case vmIntrinsics::_equals:
267    case vmIntrinsics::_equalsC:
268      break;  // InlineNatives does not control String.compareTo
269    default:
270      return NULL;
271    }
272  }
273
274  switch (id) {
275  case vmIntrinsics::_compareTo:
276    if (!SpecialStringCompareTo)  return NULL;
277    break;
278  case vmIntrinsics::_indexOf:
279    if (!SpecialStringIndexOf)  return NULL;
280    break;
281  case vmIntrinsics::_equals:
282    if (!SpecialStringEquals)  return NULL;
283    break;
284  case vmIntrinsics::_equalsC:
285    if (!SpecialArraysEquals)  return NULL;
286    break;
287  case vmIntrinsics::_arraycopy:
288    if (!InlineArrayCopy)  return NULL;
289    break;
290  case vmIntrinsics::_copyMemory:
291    if (StubRoutines::unsafe_arraycopy() == NULL)  return NULL;
292    if (!InlineArrayCopy)  return NULL;
293    break;
294  case vmIntrinsics::_hashCode:
295    if (!InlineObjectHash)  return NULL;
296    break;
297  case vmIntrinsics::_clone:
298  case vmIntrinsics::_copyOf:
299  case vmIntrinsics::_copyOfRange:
300    if (!InlineObjectCopy)  return NULL;
301    // These also use the arraycopy intrinsic mechanism:
302    if (!InlineArrayCopy)  return NULL;
303    break;
304  case vmIntrinsics::_checkIndex:
305    // We do not intrinsify this.  The optimizer does fine with it.
306    return NULL;
307
308  case vmIntrinsics::_get_AtomicLong:
309  case vmIntrinsics::_attemptUpdate:
310    if (!InlineAtomicLong)  return NULL;
311    break;
312
313  case vmIntrinsics::_Object_init:
314  case vmIntrinsics::_invoke:
315    // We do not intrinsify these; they are marked for other purposes.
316    return NULL;
317
318  case vmIntrinsics::_getCallerClass:
319    if (!UseNewReflection)  return NULL;
320    if (!InlineReflectionGetCallerClass)  return NULL;
321    if (!JDK_Version::is_gte_jdk14x_version())  return NULL;
322    break;
323
324  case vmIntrinsics::_bitCount_i:
325  case vmIntrinsics::_bitCount_l:
326    if (!UsePopCountInstruction)  return NULL;
327    break;
328
329 default:
330    break;
331  }
332
333  // -XX:-InlineClassNatives disables natives from the Class class.
334  // The flag applies to all reflective calls, notably Array.newArray
335  // (visible to Java programmers as Array.newInstance).
336  if (m->holder()->name() == ciSymbol::java_lang_Class() ||
337      m->holder()->name() == ciSymbol::java_lang_reflect_Array()) {
338    if (!InlineClassNatives)  return NULL;
339  }
340
341  // -XX:-InlineThreadNatives disables natives from the Thread class.
342  if (m->holder()->name() == ciSymbol::java_lang_Thread()) {
343    if (!InlineThreadNatives)  return NULL;
344  }
345
346  // -XX:-InlineMathNatives disables natives from the Math,Float and Double classes.
347  if (m->holder()->name() == ciSymbol::java_lang_Math() ||
348      m->holder()->name() == ciSymbol::java_lang_Float() ||
349      m->holder()->name() == ciSymbol::java_lang_Double()) {
350    if (!InlineMathNatives)  return NULL;
351  }
352
353  // -XX:-InlineUnsafeOps disables natives from the Unsafe class.
354  if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) {
355    if (!InlineUnsafeOps)  return NULL;
356  }
357
358  return new LibraryIntrinsic(m, is_virtual, (vmIntrinsics::ID) id);
359}
360
361//----------------------register_library_intrinsics-----------------------
362// Initialize this file's data structures, for each Compile instance.
363void Compile::register_library_intrinsics() {
364  // Nothing to do here.
365}
366
367JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
368  LibraryCallKit kit(jvms, this);
369  Compile* C = kit.C;
370  int nodes = C->unique();
371#ifndef PRODUCT
372  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
373    char buf[1000];
374    const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
375    tty->print_cr("Intrinsic %s", str);
376  }
377#endif
378  if (kit.try_to_inline()) {
379    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
380      tty->print("Inlining intrinsic %s%s at bci:%d in",
381                 vmIntrinsics::name_at(intrinsic_id()),
382                 (is_virtual() ? " (virtual)" : ""), kit.bci());
383      kit.caller()->print_short_name(tty);
384      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
385    }
386    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
387    if (C->log()) {
388      C->log()->elem("intrinsic id='%s'%s nodes='%d'",
389                     vmIntrinsics::name_at(intrinsic_id()),
390                     (is_virtual() ? " virtual='1'" : ""),
391                     C->unique() - nodes);
392    }
393    return kit.transfer_exceptions_into_jvms();
394  }
395
396  if (PrintIntrinsics) {
397    switch (intrinsic_id()) {
398    case vmIntrinsics::_invoke:
399    case vmIntrinsics::_Object_init:
400      // We do not expect to inline these, so do not produce any noise about them.
401      break;
402    default:
403      tty->print("Did not inline intrinsic %s%s at bci:%d in",
404                 vmIntrinsics::name_at(intrinsic_id()),
405                 (is_virtual() ? " (virtual)" : ""), kit.bci());
406      kit.caller()->print_short_name(tty);
407      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
408    }
409  }
410  C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
411  return NULL;
412}
413
414bool LibraryCallKit::try_to_inline() {
415  // Handle symbolic names for otherwise undistinguished boolean switches:
416  const bool is_store       = true;
417  const bool is_native_ptr  = true;
418  const bool is_static      = true;
419
420  switch (intrinsic_id()) {
421  case vmIntrinsics::_hashCode:
422    return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
423  case vmIntrinsics::_identityHashCode:
424    return inline_native_hashcode(/*!virtual*/ false, is_static);
425  case vmIntrinsics::_getClass:
426    return inline_native_getClass();
427
428  case vmIntrinsics::_dsin:
429  case vmIntrinsics::_dcos:
430  case vmIntrinsics::_dtan:
431  case vmIntrinsics::_dabs:
432  case vmIntrinsics::_datan2:
433  case vmIntrinsics::_dsqrt:
434  case vmIntrinsics::_dexp:
435  case vmIntrinsics::_dlog:
436  case vmIntrinsics::_dlog10:
437  case vmIntrinsics::_dpow:
438    return inline_math_native(intrinsic_id());
439
440  case vmIntrinsics::_min:
441  case vmIntrinsics::_max:
442    return inline_min_max(intrinsic_id());
443
444  case vmIntrinsics::_arraycopy:
445    return inline_arraycopy();
446
447  case vmIntrinsics::_compareTo:
448    return inline_string_compareTo();
449  case vmIntrinsics::_indexOf:
450    return inline_string_indexOf();
451  case vmIntrinsics::_equals:
452    return inline_string_equals();
453
454  case vmIntrinsics::_getObject:
455    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false);
456  case vmIntrinsics::_getBoolean:
457    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false);
458  case vmIntrinsics::_getByte:
459    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false);
460  case vmIntrinsics::_getShort:
461    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false);
462  case vmIntrinsics::_getChar:
463    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false);
464  case vmIntrinsics::_getInt:
465    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false);
466  case vmIntrinsics::_getLong:
467    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false);
468  case vmIntrinsics::_getFloat:
469    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false);
470  case vmIntrinsics::_getDouble:
471    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false);
472
473  case vmIntrinsics::_putObject:
474    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false);
475  case vmIntrinsics::_putBoolean:
476    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false);
477  case vmIntrinsics::_putByte:
478    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false);
479  case vmIntrinsics::_putShort:
480    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false);
481  case vmIntrinsics::_putChar:
482    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false);
483  case vmIntrinsics::_putInt:
484    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false);
485  case vmIntrinsics::_putLong:
486    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false);
487  case vmIntrinsics::_putFloat:
488    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false);
489  case vmIntrinsics::_putDouble:
490    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false);
491
492  case vmIntrinsics::_getByte_raw:
493    return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false);
494  case vmIntrinsics::_getShort_raw:
495    return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false);
496  case vmIntrinsics::_getChar_raw:
497    return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false);
498  case vmIntrinsics::_getInt_raw:
499    return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false);
500  case vmIntrinsics::_getLong_raw:
501    return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false);
502  case vmIntrinsics::_getFloat_raw:
503    return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false);
504  case vmIntrinsics::_getDouble_raw:
505    return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false);
506  case vmIntrinsics::_getAddress_raw:
507    return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false);
508
509  case vmIntrinsics::_putByte_raw:
510    return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false);
511  case vmIntrinsics::_putShort_raw:
512    return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false);
513  case vmIntrinsics::_putChar_raw:
514    return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false);
515  case vmIntrinsics::_putInt_raw:
516    return inline_unsafe_access(is_native_ptr, is_store, T_INT, false);
517  case vmIntrinsics::_putLong_raw:
518    return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false);
519  case vmIntrinsics::_putFloat_raw:
520    return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false);
521  case vmIntrinsics::_putDouble_raw:
522    return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false);
523  case vmIntrinsics::_putAddress_raw:
524    return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false);
525
526  case vmIntrinsics::_getObjectVolatile:
527    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true);
528  case vmIntrinsics::_getBooleanVolatile:
529    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true);
530  case vmIntrinsics::_getByteVolatile:
531    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true);
532  case vmIntrinsics::_getShortVolatile:
533    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true);
534  case vmIntrinsics::_getCharVolatile:
535    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true);
536  case vmIntrinsics::_getIntVolatile:
537    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true);
538  case vmIntrinsics::_getLongVolatile:
539    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true);
540  case vmIntrinsics::_getFloatVolatile:
541    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true);
542  case vmIntrinsics::_getDoubleVolatile:
543    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true);
544
545  case vmIntrinsics::_putObjectVolatile:
546    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true);
547  case vmIntrinsics::_putBooleanVolatile:
548    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true);
549  case vmIntrinsics::_putByteVolatile:
550    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true);
551  case vmIntrinsics::_putShortVolatile:
552    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true);
553  case vmIntrinsics::_putCharVolatile:
554    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
555  case vmIntrinsics::_putIntVolatile:
556    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
557  case vmIntrinsics::_putLongVolatile:
558    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
559  case vmIntrinsics::_putFloatVolatile:
560    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
561  case vmIntrinsics::_putDoubleVolatile:
562    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
563
564  case vmIntrinsics::_prefetchRead:
565    return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
566  case vmIntrinsics::_prefetchWrite:
567    return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
568  case vmIntrinsics::_prefetchReadStatic:
569    return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
570  case vmIntrinsics::_prefetchWriteStatic:
571    return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
572
573  case vmIntrinsics::_compareAndSwapObject:
574    return inline_unsafe_CAS(T_OBJECT);
575  case vmIntrinsics::_compareAndSwapInt:
576    return inline_unsafe_CAS(T_INT);
577  case vmIntrinsics::_compareAndSwapLong:
578    return inline_unsafe_CAS(T_LONG);
579
580  case vmIntrinsics::_putOrderedObject:
581    return inline_unsafe_ordered_store(T_OBJECT);
582  case vmIntrinsics::_putOrderedInt:
583    return inline_unsafe_ordered_store(T_INT);
584  case vmIntrinsics::_putOrderedLong:
585    return inline_unsafe_ordered_store(T_LONG);
586
587  case vmIntrinsics::_currentThread:
588    return inline_native_currentThread();
589  case vmIntrinsics::_isInterrupted:
590    return inline_native_isInterrupted();
591
592  case vmIntrinsics::_currentTimeMillis:
593    return inline_native_time_funcs(false);
594  case vmIntrinsics::_nanoTime:
595    return inline_native_time_funcs(true);
596  case vmIntrinsics::_allocateInstance:
597    return inline_unsafe_allocate();
598  case vmIntrinsics::_copyMemory:
599    return inline_unsafe_copyMemory();
600  case vmIntrinsics::_newArray:
601    return inline_native_newArray();
602  case vmIntrinsics::_getLength:
603    return inline_native_getLength();
604  case vmIntrinsics::_copyOf:
605    return inline_array_copyOf(false);
606  case vmIntrinsics::_copyOfRange:
607    return inline_array_copyOf(true);
608  case vmIntrinsics::_equalsC:
609    return inline_array_equals();
610  case vmIntrinsics::_clone:
611    return inline_native_clone(intrinsic()->is_virtual());
612
613  case vmIntrinsics::_isAssignableFrom:
614    return inline_native_subtype_check();
615
616  case vmIntrinsics::_isInstance:
617  case vmIntrinsics::_getModifiers:
618  case vmIntrinsics::_isInterface:
619  case vmIntrinsics::_isArray:
620  case vmIntrinsics::_isPrimitive:
621  case vmIntrinsics::_getSuperclass:
622  case vmIntrinsics::_getComponentType:
623  case vmIntrinsics::_getClassAccessFlags:
624    return inline_native_Class_query(intrinsic_id());
625
626  case vmIntrinsics::_floatToRawIntBits:
627  case vmIntrinsics::_floatToIntBits:
628  case vmIntrinsics::_intBitsToFloat:
629  case vmIntrinsics::_doubleToRawLongBits:
630  case vmIntrinsics::_doubleToLongBits:
631  case vmIntrinsics::_longBitsToDouble:
632    return inline_fp_conversions(intrinsic_id());
633
634  case vmIntrinsics::_numberOfLeadingZeros_i:
635  case vmIntrinsics::_numberOfLeadingZeros_l:
636    return inline_numberOfLeadingZeros(intrinsic_id());
637
638  case vmIntrinsics::_numberOfTrailingZeros_i:
639  case vmIntrinsics::_numberOfTrailingZeros_l:
640    return inline_numberOfTrailingZeros(intrinsic_id());
641
642  case vmIntrinsics::_bitCount_i:
643  case vmIntrinsics::_bitCount_l:
644    return inline_bitCount(intrinsic_id());
645
646  case vmIntrinsics::_reverseBytes_i:
647  case vmIntrinsics::_reverseBytes_l:
648    return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
649
650  case vmIntrinsics::_get_AtomicLong:
651    return inline_native_AtomicLong_get();
652  case vmIntrinsics::_attemptUpdate:
653    return inline_native_AtomicLong_attemptUpdate();
654
655  case vmIntrinsics::_getCallerClass:
656    return inline_native_Reflection_getCallerClass();
657
658  default:
659    // If you get here, it may be that someone has added a new intrinsic
660    // to the list in vmSymbols.hpp without implementing it here.
661#ifndef PRODUCT
662    if ((PrintMiscellaneous && (Verbose || WizardMode)) || PrintOpto) {
663      tty->print_cr("*** Warning: Unimplemented intrinsic %s(%d)",
664                    vmIntrinsics::name_at(intrinsic_id()), intrinsic_id());
665    }
666#endif
667    return false;
668  }
669}
670
671//------------------------------push_result------------------------------
672// Helper function for finishing intrinsics.
673void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) {
674  record_for_igvn(region);
675  set_control(_gvn.transform(region));
676  BasicType value_type = value->type()->basic_type();
677  push_node(value_type, _gvn.transform(value));
678}
679
680//------------------------------generate_guard---------------------------
681// Helper function for generating guarded fast-slow graph structures.
682// The given 'test', if true, guards a slow path.  If the test fails
683// then a fast path can be taken.  (We generally hope it fails.)
684// In all cases, GraphKit::control() is updated to the fast path.
685// The returned value represents the control for the slow path.
686// The return value is never 'top'; it is either a valid control
687// or NULL if it is obvious that the slow path can never be taken.
688// Also, if region and the slow control are not NULL, the slow edge
689// is appended to the region.
690Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) {
691  if (stopped()) {
692    // Already short circuited.
693    return NULL;
694  }
695
696  // Build an if node and its projections.
697  // If test is true we take the slow path, which we assume is uncommon.
698  if (_gvn.type(test) == TypeInt::ZERO) {
699    // The slow branch is never taken.  No need to build this guard.
700    return NULL;
701  }
702
703  IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN);
704
705  Node* if_slow = _gvn.transform( new (C, 1) IfTrueNode(iff) );
706  if (if_slow == top()) {
707    // The slow branch is never taken.  No need to build this guard.
708    return NULL;
709  }
710
711  if (region != NULL)
712    region->add_req(if_slow);
713
714  Node* if_fast = _gvn.transform( new (C, 1) IfFalseNode(iff) );
715  set_control(if_fast);
716
717  return if_slow;
718}
719
720inline Node* LibraryCallKit::generate_slow_guard(Node* test, RegionNode* region) {
721  return generate_guard(test, region, PROB_UNLIKELY_MAG(3));
722}
723inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) {
724  return generate_guard(test, region, PROB_FAIR);
725}
726
727inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region,
728                                                     Node* *pos_index) {
729  if (stopped())
730    return NULL;                // already stopped
731  if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint]
732    return NULL;                // index is already adequately typed
733  Node* cmp_lt = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
734  Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
735  Node* is_neg = generate_guard(bol_lt, region, PROB_MIN);
736  if (is_neg != NULL && pos_index != NULL) {
737    // Emulate effect of Parse::adjust_map_after_if.
738    Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS);
739    ccast->set_req(0, control());
740    (*pos_index) = _gvn.transform(ccast);
741  }
742  return is_neg;
743}
744
745inline Node* LibraryCallKit::generate_nonpositive_guard(Node* index, bool never_negative,
746                                                        Node* *pos_index) {
747  if (stopped())
748    return NULL;                // already stopped
749  if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint]
750    return NULL;                // index is already adequately typed
751  Node* cmp_le = _gvn.transform( new (C, 3) CmpINode(index, intcon(0)) );
752  BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le);
753  Node* bol_le = _gvn.transform( new (C, 2) BoolNode(cmp_le, le_or_eq) );
754  Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN);
755  if (is_notp != NULL && pos_index != NULL) {
756    // Emulate effect of Parse::adjust_map_after_if.
757    Node* ccast = new (C, 2) CastIINode(index, TypeInt::POS1);
758    ccast->set_req(0, control());
759    (*pos_index) = _gvn.transform(ccast);
760  }
761  return is_notp;
762}
763
764// Make sure that 'position' is a valid limit index, in [0..length].
765// There are two equivalent plans for checking this:
766//   A. (offset + copyLength)  unsigned<=  arrayLength
767//   B. offset  <=  (arrayLength - copyLength)
768// We require that all of the values above, except for the sum and
769// difference, are already known to be non-negative.
770// Plan A is robust in the face of overflow, if offset and copyLength
771// are both hugely positive.
772//
773// Plan B is less direct and intuitive, but it does not overflow at
774// all, since the difference of two non-negatives is always
775// representable.  Whenever Java methods must perform the equivalent
776// check they generally use Plan B instead of Plan A.
777// For the moment we use Plan A.
778inline Node* LibraryCallKit::generate_limit_guard(Node* offset,
779                                                  Node* subseq_length,
780                                                  Node* array_length,
781                                                  RegionNode* region) {
782  if (stopped())
783    return NULL;                // already stopped
784  bool zero_offset = _gvn.type(offset) == TypeInt::ZERO;
785  if (zero_offset && _gvn.eqv_uncast(subseq_length, array_length))
786    return NULL;                // common case of whole-array copy
787  Node* last = subseq_length;
788  if (!zero_offset)             // last += offset
789    last = _gvn.transform( new (C, 3) AddINode(last, offset));
790  Node* cmp_lt = _gvn.transform( new (C, 3) CmpUNode(array_length, last) );
791  Node* bol_lt = _gvn.transform( new (C, 2) BoolNode(cmp_lt, BoolTest::lt) );
792  Node* is_over = generate_guard(bol_lt, region, PROB_MIN);
793  return is_over;
794}
795
796
797//--------------------------generate_current_thread--------------------
798Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
799  ciKlass*    thread_klass = env()->Thread_klass();
800  const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
801  Node* thread = _gvn.transform(new (C, 1) ThreadLocalNode());
802  Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
803  Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT);
804  tls_output = thread;
805  return threadObj;
806}
807
808
809//------------------------------inline_string_compareTo------------------------
810bool LibraryCallKit::inline_string_compareTo() {
811
812  if (!Matcher::has_match_rule(Op_StrComp)) return false;
813
814  const int value_offset = java_lang_String::value_offset_in_bytes();
815  const int count_offset = java_lang_String::count_offset_in_bytes();
816  const int offset_offset = java_lang_String::offset_offset_in_bytes();
817
818  _sp += 2;
819  Node *argument = pop();  // pop non-receiver first:  it was pushed second
820  Node *receiver = pop();
821
822  // Null check on self without removing any arguments.  The argument
823  // null check technically happens in the wrong place, which can lead to
824  // invalid stack traces when string compare is inlined into a method
825  // which handles NullPointerExceptions.
826  _sp += 2;
827  receiver = do_null_check(receiver, T_OBJECT);
828  argument = do_null_check(argument, T_OBJECT);
829  _sp -= 2;
830  if (stopped()) {
831    return true;
832  }
833
834  ciInstanceKlass* klass = env()->String_klass();
835  const TypeInstPtr* string_type =
836    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
837
838  Node* compare =
839    _gvn.transform(new (C, 7) StrCompNode(
840                        control(),
841                        memory(TypeAryPtr::CHARS),
842                        memory(string_type->add_offset(value_offset)),
843                        memory(string_type->add_offset(count_offset)),
844                        memory(string_type->add_offset(offset_offset)),
845                        receiver,
846                        argument));
847  push(compare);
848  return true;
849}
850
851//------------------------------inline_string_equals------------------------
852bool LibraryCallKit::inline_string_equals() {
853
854  if (!Matcher::has_match_rule(Op_StrEquals)) return false;
855
856  const int value_offset = java_lang_String::value_offset_in_bytes();
857  const int count_offset = java_lang_String::count_offset_in_bytes();
858  const int offset_offset = java_lang_String::offset_offset_in_bytes();
859
860  _sp += 2;
861  Node* argument = pop();  // pop non-receiver first:  it was pushed second
862  Node* receiver = pop();
863
864  // Null check on self without removing any arguments.  The argument
865  // null check technically happens in the wrong place, which can lead to
866  // invalid stack traces when string compare is inlined into a method
867  // which handles NullPointerExceptions.
868  _sp += 2;
869  receiver = do_null_check(receiver, T_OBJECT);
870  //should not do null check for argument for String.equals(), because spec
871  //allows to specify NULL as argument.
872  _sp -= 2;
873
874  if (stopped()) {
875    return true;
876  }
877
878  // get String klass for instanceOf
879  ciInstanceKlass* klass = env()->String_klass();
880
881  // two paths (plus control) merge
882  RegionNode* region = new (C, 3) RegionNode(3);
883  Node* phi = new (C, 3) PhiNode(region, TypeInt::BOOL);
884
885  Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
886  Node* cmp  = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1)));
887  Node* bol  = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::eq));
888
889  IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
890
891  Node* if_true  = _gvn.transform(new (C, 1) IfTrueNode(iff));
892  set_control(if_true);
893
894  const TypeInstPtr* string_type =
895    TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
896
897  // instanceOf == true
898  Node* equals =
899    _gvn.transform(new (C, 7) StrEqualsNode(
900                        control(),
901                        memory(TypeAryPtr::CHARS),
902                        memory(string_type->add_offset(value_offset)),
903                        memory(string_type->add_offset(count_offset)),
904                        memory(string_type->add_offset(offset_offset)),
905                        receiver,
906                        argument));
907
908  phi->init_req(1, _gvn.transform(equals));
909  region->init_req(1, if_true);
910
911  //instanceOf == false, fallthrough
912  Node* if_false = _gvn.transform(new (C, 1) IfFalseNode(iff));
913  set_control(if_false);
914
915  phi->init_req(2, _gvn.transform(intcon(0)));
916  region->init_req(2, if_false);
917
918  // post merge
919  set_control(_gvn.transform(region));
920  record_for_igvn(region);
921
922  push(_gvn.transform(phi));
923
924  return true;
925}
926
927//------------------------------inline_array_equals----------------------------
928bool LibraryCallKit::inline_array_equals() {
929
930  if (!Matcher::has_match_rule(Op_AryEq)) return false;
931
932  _sp += 2;
933  Node *argument2 = pop();
934  Node *argument1 = pop();
935
936  Node* equals =
937    _gvn.transform(new (C, 3) AryEqNode(control(),
938                                        argument1,
939                                        argument2)
940                   );
941  push(equals);
942  return true;
943}
944
945// Java version of String.indexOf(constant string)
946// class StringDecl {
947//   StringDecl(char[] ca) {
948//     offset = 0;
949//     count = ca.length;
950//     value = ca;
951//   }
952//   int offset;
953//   int count;
954//   char[] value;
955// }
956//
957// static int string_indexOf_J(StringDecl string_object, char[] target_object,
958//                             int targetOffset, int cache_i, int md2) {
959//   int cache = cache_i;
960//   int sourceOffset = string_object.offset;
961//   int sourceCount = string_object.count;
962//   int targetCount = target_object.length;
963//
964//   int targetCountLess1 = targetCount - 1;
965//   int sourceEnd = sourceOffset + sourceCount - targetCountLess1;
966//
967//   char[] source = string_object.value;
968//   char[] target = target_object;
969//   int lastChar = target[targetCountLess1];
970//
971//  outer_loop:
972//   for (int i = sourceOffset; i < sourceEnd; ) {
973//     int src = source[i + targetCountLess1];
974//     if (src == lastChar) {
975//       // With random strings and a 4-character alphabet,
976//       // reverse matching at this point sets up 0.8% fewer
977//       // frames, but (paradoxically) makes 0.3% more probes.
978//       // Since those probes are nearer the lastChar probe,
979//       // there is may be a net D$ win with reverse matching.
980//       // But, reversing loop inhibits unroll of inner loop
981//       // for unknown reason.  So, does running outer loop from
982//       // (sourceOffset - targetCountLess1) to (sourceOffset + sourceCount)
983//       for (int j = 0; j < targetCountLess1; j++) {
984//         if (target[targetOffset + j] != source[i+j]) {
985//           if ((cache & (1 << source[i+j])) == 0) {
986//             if (md2 < j+1) {
987//               i += j+1;
988//               continue outer_loop;
989//             }
990//           }
991//           i += md2;
992//           continue outer_loop;
993//         }
994//       }
995//       return i - sourceOffset;
996//     }
997//     if ((cache & (1 << src)) == 0) {
998//       i += targetCountLess1;
999//     } // using "i += targetCount;" and an "else i++;" causes a jump to jump.
1000//     i++;
1001//   }
1002//   return -1;
1003// }
1004
1005//------------------------------string_indexOf------------------------
1006Node* LibraryCallKit::string_indexOf(Node* string_object, ciTypeArray* target_array, jint targetOffset_i,
1007                                     jint cache_i, jint md2_i) {
1008
1009  Node* no_ctrl  = NULL;
1010  float likely   = PROB_LIKELY(0.9);
1011  float unlikely = PROB_UNLIKELY(0.9);
1012
1013  const int value_offset  = java_lang_String::value_offset_in_bytes();
1014  const int count_offset  = java_lang_String::count_offset_in_bytes();
1015  const int offset_offset = java_lang_String::offset_offset_in_bytes();
1016
1017  ciInstanceKlass* klass = env()->String_klass();
1018  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
1019  const TypeAryPtr*  source_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::CHAR,TypeInt::POS), ciTypeArrayKlass::make(T_CHAR), true, 0);
1020
1021  Node* sourceOffseta = basic_plus_adr(string_object, string_object, offset_offset);
1022  Node* sourceOffset  = make_load(no_ctrl, sourceOffseta, TypeInt::INT, T_INT, string_type->add_offset(offset_offset));
1023  Node* sourceCounta  = basic_plus_adr(string_object, string_object, count_offset);
1024  Node* sourceCount   = make_load(no_ctrl, sourceCounta, TypeInt::INT, T_INT, string_type->add_offset(count_offset));
1025  Node* sourcea       = basic_plus_adr(string_object, string_object, value_offset);
1026  Node* source        = make_load(no_ctrl, sourcea, source_type, T_OBJECT, string_type->add_offset(value_offset));
1027
1028  Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array)) );
1029  jint target_length = target_array->length();
1030  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
1031  const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
1032
1033  IdealKit kit(gvn(), control(), merged_memory());
1034#define __ kit.
1035  Node* zero             = __ ConI(0);
1036  Node* one              = __ ConI(1);
1037  Node* cache            = __ ConI(cache_i);
1038  Node* md2              = __ ConI(md2_i);
1039  Node* lastChar         = __ ConI(target_array->char_at(target_length - 1));
1040  Node* targetCount      = __ ConI(target_length);
1041  Node* targetCountLess1 = __ ConI(target_length - 1);
1042  Node* targetOffset     = __ ConI(targetOffset_i);
1043  Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
1044
1045  IdealVariable rtn(kit), i(kit), j(kit); __ declares_done();
1046  Node* outer_loop = __ make_label(2 /* goto */);
1047  Node* return_    = __ make_label(1);
1048
1049  __ set(rtn,__ ConI(-1));
1050  __ loop(i, sourceOffset, BoolTest::lt, sourceEnd); {
1051       Node* i2  = __ AddI(__ value(i), targetCountLess1);
1052       // pin to prohibit loading of "next iteration" value which may SEGV (rare)
1053       Node* src = load_array_element(__ ctrl(), source, i2, TypeAryPtr::CHARS);
1054       __ if_then(src, BoolTest::eq, lastChar, unlikely); {
1055         __ loop(j, zero, BoolTest::lt, targetCountLess1); {
1056              Node* tpj = __ AddI(targetOffset, __ value(j));
1057              Node* targ = load_array_element(no_ctrl, target, tpj, target_type);
1058              Node* ipj  = __ AddI(__ value(i), __ value(j));
1059              Node* src2 = load_array_element(no_ctrl, source, ipj, TypeAryPtr::CHARS);
1060              __ if_then(targ, BoolTest::ne, src2); {
1061                __ if_then(__ AndI(cache, __ LShiftI(one, src2)), BoolTest::eq, zero); {
1062                  __ if_then(md2, BoolTest::lt, __ AddI(__ value(j), one)); {
1063                    __ increment(i, __ AddI(__ value(j), one));
1064                    __ goto_(outer_loop);
1065                  } __ end_if(); __ dead(j);
1066                }__ end_if(); __ dead(j);
1067                __ increment(i, md2);
1068                __ goto_(outer_loop);
1069              }__ end_if();
1070              __ increment(j, one);
1071         }__ end_loop(); __ dead(j);
1072         __ set(rtn, __ SubI(__ value(i), sourceOffset)); __ dead(i);
1073         __ goto_(return_);
1074       }__ end_if();
1075       __ if_then(__ AndI(cache, __ LShiftI(one, src)), BoolTest::eq, zero, likely); {
1076         __ increment(i, targetCountLess1);
1077       }__ end_if();
1078       __ increment(i, one);
1079       __ bind(outer_loop);
1080  }__ end_loop(); __ dead(i);
1081  __ bind(return_);
1082  __ drain_delay_transform();
1083
1084  set_control(__ ctrl());
1085  Node* result = __ value(rtn);
1086#undef __
1087  C->set_has_loops(true);
1088  return result;
1089}
1090
1091//------------------------------inline_string_indexOf------------------------
1092bool LibraryCallKit::inline_string_indexOf() {
1093
1094  const int value_offset  = java_lang_String::value_offset_in_bytes();
1095  const int count_offset  = java_lang_String::count_offset_in_bytes();
1096  const int offset_offset = java_lang_String::offset_offset_in_bytes();
1097
1098  _sp += 2;
1099  Node *argument = pop();  // pop non-receiver first:  it was pushed second
1100  Node *receiver = pop();
1101
1102  Node* result;
1103  if (Matcher::has_match_rule(Op_StrIndexOf) &&
1104      UseSSE42Intrinsics) {
1105    // Generate SSE4.2 version of indexOf
1106    // We currently only have match rules that use SSE4.2
1107
1108    // Null check on self without removing any arguments.  The argument
1109    // null check technically happens in the wrong place, which can lead to
1110    // invalid stack traces when string compare is inlined into a method
1111    // which handles NullPointerExceptions.
1112    _sp += 2;
1113    receiver = do_null_check(receiver, T_OBJECT);
1114    argument = do_null_check(argument, T_OBJECT);
1115    _sp -= 2;
1116
1117    if (stopped()) {
1118      return true;
1119    }
1120
1121    ciInstanceKlass* klass = env()->String_klass();
1122    const TypeInstPtr* string_type =
1123      TypeInstPtr::make(TypePtr::BotPTR, klass, false, NULL, 0);
1124
1125    result =
1126      _gvn.transform(new (C, 7)
1127                     StrIndexOfNode(control(),
1128                                    memory(TypeAryPtr::CHARS),
1129                                    memory(string_type->add_offset(value_offset)),
1130                                    memory(string_type->add_offset(count_offset)),
1131                                    memory(string_type->add_offset(offset_offset)),
1132                                    receiver,
1133                                    argument));
1134  } else { //Use LibraryCallKit::string_indexOf
1135    // don't intrinsify is argument isn't a constant string.
1136    if (!argument->is_Con()) {
1137     return false;
1138    }
1139    const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr();
1140    if (str_type == NULL) {
1141      return false;
1142    }
1143    ciInstanceKlass* klass = env()->String_klass();
1144    ciObject* str_const = str_type->const_oop();
1145    if (str_const == NULL || str_const->klass() != klass) {
1146      return false;
1147    }
1148    ciInstance* str = str_const->as_instance();
1149    assert(str != NULL, "must be instance");
1150
1151    ciObject* v = str->field_value_by_offset(value_offset).as_object();
1152    int       o = str->field_value_by_offset(offset_offset).as_int();
1153    int       c = str->field_value_by_offset(count_offset).as_int();
1154    ciTypeArray* pat = v->as_type_array(); // pattern (argument) character array
1155
1156    // constant strings have no offset and count == length which
1157    // simplifies the resulting code somewhat so lets optimize for that.
1158    if (o != 0 || c != pat->length()) {
1159     return false;
1160    }
1161
1162    // Null check on self without removing any arguments.  The argument
1163    // null check technically happens in the wrong place, which can lead to
1164    // invalid stack traces when string compare is inlined into a method
1165    // which handles NullPointerExceptions.
1166    _sp += 2;
1167    receiver = do_null_check(receiver, T_OBJECT);
1168    // No null check on the argument is needed since it's a constant String oop.
1169    _sp -= 2;
1170    if (stopped()) {
1171     return true;
1172    }
1173
1174    // The null string as a pattern always returns 0 (match at beginning of string)
1175    if (c == 0) {
1176      push(intcon(0));
1177      return true;
1178    }
1179
1180    // Generate default indexOf
1181    jchar lastChar = pat->char_at(o + (c - 1));
1182    int cache = 0;
1183    int i;
1184    for (i = 0; i < c - 1; i++) {
1185      assert(i < pat->length(), "out of range");
1186      cache |= (1 << (pat->char_at(o + i) & (sizeof(cache) * BitsPerByte - 1)));
1187    }
1188
1189    int md2 = c;
1190    for (i = 0; i < c - 1; i++) {
1191      assert(i < pat->length(), "out of range");
1192      if (pat->char_at(o + i) == lastChar) {
1193        md2 = (c - 1) - i;
1194      }
1195    }
1196
1197    result = string_indexOf(receiver, pat, o, cache, md2);
1198  }
1199
1200  push(result);
1201  return true;
1202}
1203
1204//--------------------------pop_math_arg--------------------------------
1205// Pop a double argument to a math function from the stack
1206// rounding it if necessary.
1207Node * LibraryCallKit::pop_math_arg() {
1208  Node *arg = pop_pair();
1209  if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 )
1210    arg = _gvn.transform( new (C, 2) RoundDoubleNode(0, arg) );
1211  return arg;
1212}
1213
1214//------------------------------inline_trig----------------------------------
1215// Inline sin/cos/tan instructions, if possible.  If rounding is required, do
1216// argument reduction which will turn into a fast/slow diamond.
1217bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
1218  _sp += arg_size();            // restore stack pointer
1219  Node* arg = pop_math_arg();
1220  Node* trig = NULL;
1221
1222  switch (id) {
1223  case vmIntrinsics::_dsin:
1224    trig = _gvn.transform((Node*)new (C, 2) SinDNode(arg));
1225    break;
1226  case vmIntrinsics::_dcos:
1227    trig = _gvn.transform((Node*)new (C, 2) CosDNode(arg));
1228    break;
1229  case vmIntrinsics::_dtan:
1230    trig = _gvn.transform((Node*)new (C, 2) TanDNode(arg));
1231    break;
1232  default:
1233    assert(false, "bad intrinsic was passed in");
1234    return false;
1235  }
1236
1237  // Rounding required?  Check for argument reduction!
1238  if( Matcher::strict_fp_requires_explicit_rounding ) {
1239
1240    static const double     pi_4 =  0.7853981633974483;
1241    static const double neg_pi_4 = -0.7853981633974483;
1242    // pi/2 in 80-bit extended precision
1243    // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00};
1244    // -pi/2 in 80-bit extended precision
1245    // static const unsigned char neg_pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0xbf,0x00,0x00,0x00,0x00,0x00,0x00};
1246    // Cutoff value for using this argument reduction technique
1247    //static const double    pi_2_minus_epsilon =  1.564660403643354;
1248    //static const double neg_pi_2_plus_epsilon = -1.564660403643354;
1249
1250    // Pseudocode for sin:
1251    // if (x <= Math.PI / 4.0) {
1252    //   if (x >= -Math.PI / 4.0) return  fsin(x);
1253    //   if (x >= -Math.PI / 2.0) return -fcos(x + Math.PI / 2.0);
1254    // } else {
1255    //   if (x <=  Math.PI / 2.0) return  fcos(x - Math.PI / 2.0);
1256    // }
1257    // return StrictMath.sin(x);
1258
1259    // Pseudocode for cos:
1260    // if (x <= Math.PI / 4.0) {
1261    //   if (x >= -Math.PI / 4.0) return  fcos(x);
1262    //   if (x >= -Math.PI / 2.0) return  fsin(x + Math.PI / 2.0);
1263    // } else {
1264    //   if (x <=  Math.PI / 2.0) return -fsin(x - Math.PI / 2.0);
1265    // }
1266    // return StrictMath.cos(x);
1267
1268    // Actually, sticking in an 80-bit Intel value into C2 will be tough; it
1269    // requires a special machine instruction to load it.  Instead we'll try
1270    // the 'easy' case.  If we really need the extra range +/- PI/2 we'll
1271    // probably do the math inside the SIN encoding.
1272
1273    // Make the merge point
1274    RegionNode *r = new (C, 3) RegionNode(3);
1275    Node *phi = new (C, 3) PhiNode(r,Type::DOUBLE);
1276
1277    // Flatten arg so we need only 1 test
1278    Node *abs = _gvn.transform(new (C, 2) AbsDNode(arg));
1279    // Node for PI/4 constant
1280    Node *pi4 = makecon(TypeD::make(pi_4));
1281    // Check PI/4 : abs(arg)
1282    Node *cmp = _gvn.transform(new (C, 3) CmpDNode(pi4,abs));
1283    // Check: If PI/4 < abs(arg) then go slow
1284    Node *bol = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::lt ) );
1285    // Branch either way
1286    IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
1287    set_control(opt_iff(r,iff));
1288
1289    // Set fast path result
1290    phi->init_req(2,trig);
1291
1292    // Slow path - non-blocking leaf call
1293    Node* call = NULL;
1294    switch (id) {
1295    case vmIntrinsics::_dsin:
1296      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1297                               CAST_FROM_FN_PTR(address, SharedRuntime::dsin),
1298                               "Sin", NULL, arg, top());
1299      break;
1300    case vmIntrinsics::_dcos:
1301      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1302                               CAST_FROM_FN_PTR(address, SharedRuntime::dcos),
1303                               "Cos", NULL, arg, top());
1304      break;
1305    case vmIntrinsics::_dtan:
1306      call = make_runtime_call(RC_LEAF, OptoRuntime::Math_D_D_Type(),
1307                               CAST_FROM_FN_PTR(address, SharedRuntime::dtan),
1308                               "Tan", NULL, arg, top());
1309      break;
1310    }
1311    assert(control()->in(0) == call, "");
1312    Node* slow_result = _gvn.transform(new (C, 1) ProjNode(call,TypeFunc::Parms));
1313    r->init_req(1,control());
1314    phi->init_req(1,slow_result);
1315
1316    // Post-merge
1317    set_control(_gvn.transform(r));
1318    record_for_igvn(r);
1319    trig = _gvn.transform(phi);
1320
1321    C->set_has_split_ifs(true); // Has chance for split-if optimization
1322  }
1323  // Push result back on JVM stack
1324  push_pair(trig);
1325  return true;
1326}
1327
1328//------------------------------inline_sqrt-------------------------------------
1329// Inline square root instruction, if possible.
1330bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
1331  assert(id == vmIntrinsics::_dsqrt, "Not square root");
1332  _sp += arg_size();        // restore stack pointer
1333  push_pair(_gvn.transform(new (C, 2) SqrtDNode(0, pop_math_arg())));
1334  return true;
1335}
1336
1337//------------------------------inline_abs-------------------------------------
1338// Inline absolute value instruction, if possible.
1339bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
1340  assert(id == vmIntrinsics::_dabs, "Not absolute value");
1341  _sp += arg_size();        // restore stack pointer
1342  push_pair(_gvn.transform(new (C, 2) AbsDNode(pop_math_arg())));
1343  return true;
1344}
1345
1346//------------------------------inline_exp-------------------------------------
1347// Inline exp instructions, if possible.  The Intel hardware only misses
1348// really odd corner cases (+/- Infinity).  Just uncommon-trap them.
1349bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
1350  assert(id == vmIntrinsics::_dexp, "Not exp");
1351
1352  // If this inlining ever returned NaN in the past, we do not intrinsify it
1353  // every again.  NaN results requires StrictMath.exp handling.
1354  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1355
1356  // Do not intrinsify on older platforms which lack cmove.
1357  if (ConditionalMoveLimit == 0)  return false;
1358
1359  _sp += arg_size();        // restore stack pointer
1360  Node *x = pop_math_arg();
1361  Node *result = _gvn.transform(new (C, 2) ExpDNode(0,x));
1362
1363  //-------------------
1364  //result=(result.isNaN())? StrictMath::exp():result;
1365  // Check: If isNaN() by checking result!=result? then go to Strict Math
1366  Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result));
1367  // Build the boolean node
1368  Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) );
1369
1370  { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1371    // End the current control-flow path
1372    push_pair(x);
1373    // Math.exp intrinsic returned a NaN, which requires StrictMath.exp
1374    // to handle.  Recompile without intrinsifying Math.exp
1375    uncommon_trap(Deoptimization::Reason_intrinsic,
1376                  Deoptimization::Action_make_not_entrant);
1377  }
1378
1379  C->set_has_split_ifs(true); // Has chance for split-if optimization
1380
1381  push_pair(result);
1382
1383  return true;
1384}
1385
1386//------------------------------inline_pow-------------------------------------
1387// Inline power instructions, if possible.
1388bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
1389  assert(id == vmIntrinsics::_dpow, "Not pow");
1390
1391  // If this inlining ever returned NaN in the past, we do not intrinsify it
1392  // every again.  NaN results requires StrictMath.pow handling.
1393  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
1394
1395  // Do not intrinsify on older platforms which lack cmove.
1396  if (ConditionalMoveLimit == 0)  return false;
1397
1398  // Pseudocode for pow
1399  // if (x <= 0.0) {
1400  //   if ((double)((int)y)==y) { // if y is int
1401  //     result = ((1&(int)y)==0)?-DPow(abs(x), y):DPow(abs(x), y)
1402  //   } else {
1403  //     result = NaN;
1404  //   }
1405  // } else {
1406  //   result = DPow(x,y);
1407  // }
1408  // if (result != result)?  {
1409  //   uncommon_trap();
1410  // }
1411  // return result;
1412
1413  _sp += arg_size();        // restore stack pointer
1414  Node* y = pop_math_arg();
1415  Node* x = pop_math_arg();
1416
1417  Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) );
1418
1419  // Short form: if not top-level (i.e., Math.pow but inlining Math.pow
1420  // inside of something) then skip the fancy tests and just check for
1421  // NaN result.
1422  Node *result = NULL;
1423  if( jvms()->depth() >= 1 ) {
1424    result = fast_result;
1425  } else {
1426
1427    // Set the merge point for If node with condition of (x <= 0.0)
1428    // There are four possible paths to region node and phi node
1429    RegionNode *r = new (C, 4) RegionNode(4);
1430    Node *phi = new (C, 4) PhiNode(r, Type::DOUBLE);
1431
1432    // Build the first if node: if (x <= 0.0)
1433    // Node for 0 constant
1434    Node *zeronode = makecon(TypeD::ZERO);
1435    // Check x:0
1436    Node *cmp = _gvn.transform(new (C, 3) CmpDNode(x, zeronode));
1437    // Check: If (x<=0) then go complex path
1438    Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) );
1439    // Branch either way
1440    IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1441    Node *opt_test = _gvn.transform(if1);
1442    //assert( opt_test->is_If(), "Expect an IfNode");
1443    IfNode *opt_if1 = (IfNode*)opt_test;
1444    // Fast path taken; set region slot 3
1445    Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_if1) );
1446    r->init_req(3,fast_taken); // Capture fast-control
1447
1448    // Fast path not-taken, i.e. slow path
1449    Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(opt_if1) );
1450
1451    // Set fast path result
1452    Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, y, x) );
1453    phi->init_req(3, fast_result);
1454
1455    // Complex path
1456    // Build the second if node (if y is int)
1457    // Node for (int)y
1458    Node *inty = _gvn.transform( new (C, 2) ConvD2INode(y));
1459    // Node for (double)((int) y)
1460    Node *doubleinty= _gvn.transform( new (C, 2) ConvI2DNode(inty));
1461    // Check (double)((int) y) : y
1462    Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y));
1463    // Check if (y isn't int) then go to slow path
1464
1465    Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
1466    // Branch either way
1467    IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1468    Node *slow_path = opt_iff(r,if2); // Set region path 2
1469
1470    // Calculate DPow(abs(x), y)*(1 & (int)y)
1471    // Node for constant 1
1472    Node *conone = intcon(1);
1473    // 1& (int)y
1474    Node *signnode= _gvn.transform( new (C, 3) AndINode(conone, inty) );
1475    // zero node
1476    Node *conzero = intcon(0);
1477    // Check (1&(int)y)==0?
1478    Node *cmpeq1 = _gvn.transform(new (C, 3) CmpINode(signnode, conzero));
1479    // Check if (1&(int)y)!=0?, if so the result is negative
1480    Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmpeq1, BoolTest::ne ) );
1481    // abs(x)
1482    Node *absx=_gvn.transform( new (C, 2) AbsDNode(x));
1483    // abs(x)^y
1484    Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, y, absx) );
1485    // -abs(x)^y
1486    Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy));
1487    // (1&(int)y)==1?-DPow(abs(x), y):DPow(abs(x), y)
1488    Node *signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE));
1489    // Set complex path fast result
1490    phi->init_req(2, signresult);
1491
1492    static const jlong nan_bits = CONST64(0x7ff8000000000000);
1493    Node *slow_result = makecon(TypeD::make(*(double*)&nan_bits)); // return NaN
1494    r->init_req(1,slow_path);
1495    phi->init_req(1,slow_result);
1496
1497    // Post merge
1498    set_control(_gvn.transform(r));
1499    record_for_igvn(r);
1500    result=_gvn.transform(phi);
1501  }
1502
1503  //-------------------
1504  //result=(result.isNaN())? uncommon_trap():result;
1505  // Check: If isNaN() by checking result!=result? then go to Strict Math
1506  Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result));
1507  // Build the boolean node
1508  Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) );
1509
1510  { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
1511    // End the current control-flow path
1512    push_pair(x);
1513    push_pair(y);
1514    // Math.pow intrinsic returned a NaN, which requires StrictMath.pow
1515    // to handle.  Recompile without intrinsifying Math.pow.
1516    uncommon_trap(Deoptimization::Reason_intrinsic,
1517                  Deoptimization::Action_make_not_entrant);
1518  }
1519
1520  C->set_has_split_ifs(true); // Has chance for split-if optimization
1521
1522  push_pair(result);
1523
1524  return true;
1525}
1526
1527//------------------------------inline_trans-------------------------------------
1528// Inline transcendental instructions, if possible.  The Intel hardware gets
1529// these right, no funny corner cases missed.
1530bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
1531  _sp += arg_size();        // restore stack pointer
1532  Node* arg = pop_math_arg();
1533  Node* trans = NULL;
1534
1535  switch (id) {
1536  case vmIntrinsics::_dlog:
1537    trans = _gvn.transform((Node*)new (C, 2) LogDNode(arg));
1538    break;
1539  case vmIntrinsics::_dlog10:
1540    trans = _gvn.transform((Node*)new (C, 2) Log10DNode(arg));
1541    break;
1542  default:
1543    assert(false, "bad intrinsic was passed in");
1544    return false;
1545  }
1546
1547  // Push result back on JVM stack
1548  push_pair(trans);
1549  return true;
1550}
1551
1552//------------------------------runtime_math-----------------------------
1553bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
1554  Node* a = NULL;
1555  Node* b = NULL;
1556
1557  assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
1558         "must be (DD)D or (D)D type");
1559
1560  // Inputs
1561  _sp += arg_size();        // restore stack pointer
1562  if (call_type == OptoRuntime::Math_DD_D_Type()) {
1563    b = pop_math_arg();
1564  }
1565  a = pop_math_arg();
1566
1567  const TypePtr* no_memory_effects = NULL;
1568  Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
1569                                 no_memory_effects,
1570                                 a, top(), b, b ? top() : NULL);
1571  Node* value = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+0));
1572#ifdef ASSERT
1573  Node* value_top = _gvn.transform(new (C, 1) ProjNode(trig, TypeFunc::Parms+1));
1574  assert(value_top == top(), "second value must be top");
1575#endif
1576
1577  push_pair(value);
1578  return true;
1579}
1580
1581//------------------------------inline_math_native-----------------------------
1582bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
1583  switch (id) {
1584    // These intrinsics are not properly supported on all hardware
1585  case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
1586    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
1587  case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
1588    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
1589  case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
1590    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
1591
1592  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) :
1593    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
1594  case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) :
1595    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
1596
1597    // These intrinsics are supported on all hardware
1598  case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false;
1599  case vmIntrinsics::_dabs:  return Matcher::has_match_rule(Op_AbsD)  ? inline_abs(id)  : false;
1600
1601    // These intrinsics don't work on X86.  The ad implementation doesn't
1602    // handle NaN's properly.  Instead of returning infinity, the ad
1603    // implementation returns a NaN on overflow. See bug: 6304089
1604    // Once the ad implementations are fixed, change the code below
1605    // to match the intrinsics above
1606
1607  case vmIntrinsics::_dexp:  return
1608    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
1609  case vmIntrinsics::_dpow:  return
1610    runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
1611
1612   // These intrinsics are not yet correctly implemented
1613  case vmIntrinsics::_datan2:
1614    return false;
1615
1616  default:
1617    ShouldNotReachHere();
1618    return false;
1619  }
1620}
1621
1622static bool is_simple_name(Node* n) {
1623  return (n->req() == 1         // constant
1624          || (n->is_Type() && n->as_Type()->type()->singleton())
1625          || n->is_Proj()       // parameter or return value
1626          || n->is_Phi()        // local of some sort
1627          );
1628}
1629
1630//----------------------------inline_min_max-----------------------------------
1631bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
1632  push(generate_min_max(id, argument(0), argument(1)));
1633
1634  return true;
1635}
1636
1637Node*
1638LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) {
1639  // These are the candidate return value:
1640  Node* xvalue = x0;
1641  Node* yvalue = y0;
1642
1643  if (xvalue == yvalue) {
1644    return xvalue;
1645  }
1646
1647  bool want_max = (id == vmIntrinsics::_max);
1648
1649  const TypeInt* txvalue = _gvn.type(xvalue)->isa_int();
1650  const TypeInt* tyvalue = _gvn.type(yvalue)->isa_int();
1651  if (txvalue == NULL || tyvalue == NULL)  return top();
1652  // This is not really necessary, but it is consistent with a
1653  // hypothetical MaxINode::Value method:
1654  int widen = MAX2(txvalue->_widen, tyvalue->_widen);
1655
1656  // %%% This folding logic should (ideally) be in a different place.
1657  // Some should be inside IfNode, and there to be a more reliable
1658  // transformation of ?: style patterns into cmoves.  We also want
1659  // more powerful optimizations around cmove and min/max.
1660
1661  // Try to find a dominating comparison of these guys.
1662  // It can simplify the index computation for Arrays.copyOf
1663  // and similar uses of System.arraycopy.
1664  // First, compute the normalized version of CmpI(x, y).
1665  int   cmp_op = Op_CmpI;
1666  Node* xkey = xvalue;
1667  Node* ykey = yvalue;
1668  Node* ideal_cmpxy = _gvn.transform( new(C, 3) CmpINode(xkey, ykey) );
1669  if (ideal_cmpxy->is_Cmp()) {
1670    // E.g., if we have CmpI(length - offset, count),
1671    // it might idealize to CmpI(length, count + offset)
1672    cmp_op = ideal_cmpxy->Opcode();
1673    xkey = ideal_cmpxy->in(1);
1674    ykey = ideal_cmpxy->in(2);
1675  }
1676
1677  // Start by locating any relevant comparisons.
1678  Node* start_from = (xkey->outcnt() < ykey->outcnt()) ? xkey : ykey;
1679  Node* cmpxy = NULL;
1680  Node* cmpyx = NULL;
1681  for (DUIterator_Fast kmax, k = start_from->fast_outs(kmax); k < kmax; k++) {
1682    Node* cmp = start_from->fast_out(k);
1683    if (cmp->outcnt() > 0 &&            // must have prior uses
1684        cmp->in(0) == NULL &&           // must be context-independent
1685        cmp->Opcode() == cmp_op) {      // right kind of compare
1686      if (cmp->in(1) == xkey && cmp->in(2) == ykey)  cmpxy = cmp;
1687      if (cmp->in(1) == ykey && cmp->in(2) == xkey)  cmpyx = cmp;
1688    }
1689  }
1690
1691  const int NCMPS = 2;
1692  Node* cmps[NCMPS] = { cmpxy, cmpyx };
1693  int cmpn;
1694  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1695    if (cmps[cmpn] != NULL)  break;     // find a result
1696  }
1697  if (cmpn < NCMPS) {
1698    // Look for a dominating test that tells us the min and max.
1699    int depth = 0;                // Limit search depth for speed
1700    Node* dom = control();
1701    for (; dom != NULL; dom = IfNode::up_one_dom(dom, true)) {
1702      if (++depth >= 100)  break;
1703      Node* ifproj = dom;
1704      if (!ifproj->is_Proj())  continue;
1705      Node* iff = ifproj->in(0);
1706      if (!iff->is_If())  continue;
1707      Node* bol = iff->in(1);
1708      if (!bol->is_Bool())  continue;
1709      Node* cmp = bol->in(1);
1710      if (cmp == NULL)  continue;
1711      for (cmpn = 0; cmpn < NCMPS; cmpn++)
1712        if (cmps[cmpn] == cmp)  break;
1713      if (cmpn == NCMPS)  continue;
1714      BoolTest::mask btest = bol->as_Bool()->_test._test;
1715      if (ifproj->is_IfFalse())  btest = BoolTest(btest).negate();
1716      if (cmp->in(1) == ykey)    btest = BoolTest(btest).commute();
1717      // At this point, we know that 'x btest y' is true.
1718      switch (btest) {
1719      case BoolTest::eq:
1720        // They are proven equal, so we can collapse the min/max.
1721        // Either value is the answer.  Choose the simpler.
1722        if (is_simple_name(yvalue) && !is_simple_name(xvalue))
1723          return yvalue;
1724        return xvalue;
1725      case BoolTest::lt:          // x < y
1726      case BoolTest::le:          // x <= y
1727        return (want_max ? yvalue : xvalue);
1728      case BoolTest::gt:          // x > y
1729      case BoolTest::ge:          // x >= y
1730        return (want_max ? xvalue : yvalue);
1731      }
1732    }
1733  }
1734
1735  // We failed to find a dominating test.
1736  // Let's pick a test that might GVN with prior tests.
1737  Node*          best_bol   = NULL;
1738  BoolTest::mask best_btest = BoolTest::illegal;
1739  for (cmpn = 0; cmpn < NCMPS; cmpn++) {
1740    Node* cmp = cmps[cmpn];
1741    if (cmp == NULL)  continue;
1742    for (DUIterator_Fast jmax, j = cmp->fast_outs(jmax); j < jmax; j++) {
1743      Node* bol = cmp->fast_out(j);
1744      if (!bol->is_Bool())  continue;
1745      BoolTest::mask btest = bol->as_Bool()->_test._test;
1746      if (btest == BoolTest::eq || btest == BoolTest::ne)  continue;
1747      if (cmp->in(1) == ykey)   btest = BoolTest(btest).commute();
1748      if (bol->outcnt() > (best_bol == NULL ? 0 : best_bol->outcnt())) {
1749        best_bol   = bol->as_Bool();
1750        best_btest = btest;
1751      }
1752    }
1753  }
1754
1755  Node* answer_if_true  = NULL;
1756  Node* answer_if_false = NULL;
1757  switch (best_btest) {
1758  default:
1759    if (cmpxy == NULL)
1760      cmpxy = ideal_cmpxy;
1761    best_bol = _gvn.transform( new(C, 2) BoolNode(cmpxy, BoolTest::lt) );
1762    // and fall through:
1763  case BoolTest::lt:          // x < y
1764  case BoolTest::le:          // x <= y
1765    answer_if_true  = (want_max ? yvalue : xvalue);
1766    answer_if_false = (want_max ? xvalue : yvalue);
1767    break;
1768  case BoolTest::gt:          // x > y
1769  case BoolTest::ge:          // x >= y
1770    answer_if_true  = (want_max ? xvalue : yvalue);
1771    answer_if_false = (want_max ? yvalue : xvalue);
1772    break;
1773  }
1774
1775  jint hi, lo;
1776  if (want_max) {
1777    // We can sharpen the minimum.
1778    hi = MAX2(txvalue->_hi, tyvalue->_hi);
1779    lo = MAX2(txvalue->_lo, tyvalue->_lo);
1780  } else {
1781    // We can sharpen the maximum.
1782    hi = MIN2(txvalue->_hi, tyvalue->_hi);
1783    lo = MIN2(txvalue->_lo, tyvalue->_lo);
1784  }
1785
1786  // Use a flow-free graph structure, to avoid creating excess control edges
1787  // which could hinder other optimizations.
1788  // Since Math.min/max is often used with arraycopy, we want
1789  // tightly_coupled_allocation to be able to see beyond min/max expressions.
1790  Node* cmov = CMoveNode::make(C, NULL, best_bol,
1791                               answer_if_false, answer_if_true,
1792                               TypeInt::make(lo, hi, widen));
1793
1794  return _gvn.transform(cmov);
1795
1796  /*
1797  // This is not as desirable as it may seem, since Min and Max
1798  // nodes do not have a full set of optimizations.
1799  // And they would interfere, anyway, with 'if' optimizations
1800  // and with CMoveI canonical forms.
1801  switch (id) {
1802  case vmIntrinsics::_min:
1803    result_val = _gvn.transform(new (C, 3) MinINode(x,y)); break;
1804  case vmIntrinsics::_max:
1805    result_val = _gvn.transform(new (C, 3) MaxINode(x,y)); break;
1806  default:
1807    ShouldNotReachHere();
1808  }
1809  */
1810}
1811
1812inline int
1813LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
1814  const TypePtr* base_type = TypePtr::NULL_PTR;
1815  if (base != NULL)  base_type = _gvn.type(base)->isa_ptr();
1816  if (base_type == NULL) {
1817    // Unknown type.
1818    return Type::AnyPtr;
1819  } else if (base_type == TypePtr::NULL_PTR) {
1820    // Since this is a NULL+long form, we have to switch to a rawptr.
1821    base   = _gvn.transform( new (C, 2) CastX2PNode(offset) );
1822    offset = MakeConX(0);
1823    return Type::RawPtr;
1824  } else if (base_type->base() == Type::RawPtr) {
1825    return Type::RawPtr;
1826  } else if (base_type->isa_oopptr()) {
1827    // Base is never null => always a heap address.
1828    if (base_type->ptr() == TypePtr::NotNull) {
1829      return Type::OopPtr;
1830    }
1831    // Offset is small => always a heap address.
1832    const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t();
1833    if (offset_type != NULL &&
1834        base_type->offset() == 0 &&     // (should always be?)
1835        offset_type->_lo >= 0 &&
1836        !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
1837      return Type::OopPtr;
1838    }
1839    // Otherwise, it might either be oop+off or NULL+addr.
1840    return Type::AnyPtr;
1841  } else {
1842    // No information:
1843    return Type::AnyPtr;
1844  }
1845}
1846
1847inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
1848  int kind = classify_unsafe_addr(base, offset);
1849  if (kind == Type::RawPtr) {
1850    return basic_plus_adr(top(), base, offset);
1851  } else {
1852    return basic_plus_adr(base, offset);
1853  }
1854}
1855
1856//-------------------inline_numberOfLeadingZeros_int/long-----------------------
1857// inline int Integer.numberOfLeadingZeros(int)
1858// inline int Long.numberOfLeadingZeros(long)
1859bool LibraryCallKit::inline_numberOfLeadingZeros(vmIntrinsics::ID id) {
1860  assert(id == vmIntrinsics::_numberOfLeadingZeros_i || id == vmIntrinsics::_numberOfLeadingZeros_l, "not numberOfLeadingZeros");
1861  if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false;
1862  if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false;
1863  _sp += arg_size();  // restore stack pointer
1864  switch (id) {
1865  case vmIntrinsics::_numberOfLeadingZeros_i:
1866    push(_gvn.transform(new (C, 2) CountLeadingZerosINode(pop())));
1867    break;
1868  case vmIntrinsics::_numberOfLeadingZeros_l:
1869    push(_gvn.transform(new (C, 2) CountLeadingZerosLNode(pop_pair())));
1870    break;
1871  default:
1872    ShouldNotReachHere();
1873  }
1874  return true;
1875}
1876
1877//-------------------inline_numberOfTrailingZeros_int/long----------------------
1878// inline int Integer.numberOfTrailingZeros(int)
1879// inline int Long.numberOfTrailingZeros(long)
1880bool LibraryCallKit::inline_numberOfTrailingZeros(vmIntrinsics::ID id) {
1881  assert(id == vmIntrinsics::_numberOfTrailingZeros_i || id == vmIntrinsics::_numberOfTrailingZeros_l, "not numberOfTrailingZeros");
1882  if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false;
1883  if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false;
1884  _sp += arg_size();  // restore stack pointer
1885  switch (id) {
1886  case vmIntrinsics::_numberOfTrailingZeros_i:
1887    push(_gvn.transform(new (C, 2) CountTrailingZerosINode(pop())));
1888    break;
1889  case vmIntrinsics::_numberOfTrailingZeros_l:
1890    push(_gvn.transform(new (C, 2) CountTrailingZerosLNode(pop_pair())));
1891    break;
1892  default:
1893    ShouldNotReachHere();
1894  }
1895  return true;
1896}
1897
1898//----------------------------inline_bitCount_int/long-----------------------
1899// inline int Integer.bitCount(int)
1900// inline int Long.bitCount(long)
1901bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) {
1902  assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount");
1903  if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false;
1904  if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false;
1905  _sp += arg_size();  // restore stack pointer
1906  switch (id) {
1907  case vmIntrinsics::_bitCount_i:
1908    push(_gvn.transform(new (C, 2) PopCountINode(pop())));
1909    break;
1910  case vmIntrinsics::_bitCount_l:
1911    push(_gvn.transform(new (C, 2) PopCountLNode(pop_pair())));
1912    break;
1913  default:
1914    ShouldNotReachHere();
1915  }
1916  return true;
1917}
1918
1919//----------------------------inline_reverseBytes_int/long-------------------
1920// inline Integer.reverseBytes(int)
1921// inline Long.reverseBytes(long)
1922bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
1923  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
1924  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
1925  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
1926  _sp += arg_size();        // restore stack pointer
1927  switch (id) {
1928  case vmIntrinsics::_reverseBytes_i:
1929    push(_gvn.transform(new (C, 2) ReverseBytesINode(0, pop())));
1930    break;
1931  case vmIntrinsics::_reverseBytes_l:
1932    push_pair(_gvn.transform(new (C, 2) ReverseBytesLNode(0, pop_pair())));
1933    break;
1934  default:
1935    ;
1936  }
1937  return true;
1938}
1939
1940//----------------------------inline_unsafe_access----------------------------
1941
1942const static BasicType T_ADDRESS_HOLDER = T_LONG;
1943
1944// Interpret Unsafe.fieldOffset cookies correctly:
1945extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
1946
1947bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
1948  if (callee()->is_static())  return false;  // caller must have the capability!
1949
1950#ifndef PRODUCT
1951  {
1952    ResourceMark rm;
1953    // Check the signatures.
1954    ciSignature* sig = signature();
1955#ifdef ASSERT
1956    if (!is_store) {
1957      // Object getObject(Object base, int/long offset), etc.
1958      BasicType rtype = sig->return_type()->basic_type();
1959      if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name())
1960          rtype = T_ADDRESS;  // it is really a C void*
1961      assert(rtype == type, "getter must return the expected value");
1962      if (!is_native_ptr) {
1963        assert(sig->count() == 2, "oop getter has 2 arguments");
1964        assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
1965        assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
1966      } else {
1967        assert(sig->count() == 1, "native getter has 1 argument");
1968        assert(sig->type_at(0)->basic_type() == T_LONG, "getter base is long");
1969      }
1970    } else {
1971      // void putObject(Object base, int/long offset, Object x), etc.
1972      assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
1973      if (!is_native_ptr) {
1974        assert(sig->count() == 3, "oop putter has 3 arguments");
1975        assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
1976        assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
1977      } else {
1978        assert(sig->count() == 2, "native putter has 2 arguments");
1979        assert(sig->type_at(0)->basic_type() == T_LONG, "putter base is long");
1980      }
1981      BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
1982      if (vtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::putAddress_name())
1983        vtype = T_ADDRESS;  // it is really a C void*
1984      assert(vtype == type, "putter must accept the expected value");
1985    }
1986#endif // ASSERT
1987 }
1988#endif //PRODUCT
1989
1990  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
1991
1992  int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ];
1993
1994  // Argument words:  "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words
1995  int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0);
1996
1997  debug_only(int saved_sp = _sp);
1998  _sp += nargs;
1999
2000  Node* val;
2001  debug_only(val = (Node*)(uintptr_t)-1);
2002
2003
2004  if (is_store) {
2005    // Get the value being stored.  (Pop it first; it was pushed last.)
2006    switch (type) {
2007    case T_DOUBLE:
2008    case T_LONG:
2009    case T_ADDRESS:
2010      val = pop_pair();
2011      break;
2012    default:
2013      val = pop();
2014    }
2015  }
2016
2017  // Build address expression.  See the code in inline_unsafe_prefetch.
2018  Node *adr;
2019  Node *heap_base_oop = top();
2020  if (!is_native_ptr) {
2021    // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2022    Node* offset = pop_pair();
2023    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2024    Node* base   = pop();
2025    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2026    // to be plain byte offsets, which are also the same as those accepted
2027    // by oopDesc::field_base.
2028    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2029           "fieldOffset must be byte-scaled");
2030    // 32-bit machines ignore the high half!
2031    offset = ConvL2X(offset);
2032    adr = make_unsafe_address(base, offset);
2033    heap_base_oop = base;
2034  } else {
2035    Node* ptr = pop_pair();
2036    // Adjust Java long to machine word:
2037    ptr = ConvL2X(ptr);
2038    adr = make_unsafe_address(NULL, ptr);
2039  }
2040
2041  // Pop receiver last:  it was pushed first.
2042  Node *receiver = pop();
2043
2044  assert(saved_sp == _sp, "must have correct argument count");
2045
2046  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2047
2048  // First guess at the value type.
2049  const Type *value_type = Type::get_const_basic_type(type);
2050
2051  // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
2052  // there was not enough information to nail it down.
2053  Compile::AliasType* alias_type = C->alias_type(adr_type);
2054  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2055
2056  // We will need memory barriers unless we can determine a unique
2057  // alias category for this reference.  (Note:  If for some reason
2058  // the barriers get omitted and the unsafe reference begins to "pollute"
2059  // the alias analysis of the rest of the graph, either Compile::can_alias
2060  // or Compile::must_alias will throw a diagnostic assert.)
2061  bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM);
2062
2063  if (!is_store && type == T_OBJECT) {
2064    // Attempt to infer a sharper value type from the offset and base type.
2065    ciKlass* sharpened_klass = NULL;
2066
2067    // See if it is an instance field, with an object type.
2068    if (alias_type->field() != NULL) {
2069      assert(!is_native_ptr, "native pointer op cannot use a java address");
2070      if (alias_type->field()->type()->is_klass()) {
2071        sharpened_klass = alias_type->field()->type()->as_klass();
2072      }
2073    }
2074
2075    // See if it is a narrow oop array.
2076    if (adr_type->isa_aryptr()) {
2077      if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes(type)) {
2078        const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
2079        if (elem_type != NULL) {
2080          sharpened_klass = elem_type->klass();
2081        }
2082      }
2083    }
2084
2085    if (sharpened_klass != NULL) {
2086      const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
2087
2088      // Sharpen the value type.
2089      value_type = tjp;
2090
2091#ifndef PRODUCT
2092      if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2093        tty->print("  from base type:  ");   adr_type->dump();
2094        tty->print("  sharpened value: "); value_type->dump();
2095      }
2096#endif
2097    }
2098  }
2099
2100  // Null check on self without removing any arguments.  The argument
2101  // null check technically happens in the wrong place, which can lead to
2102  // invalid stack traces when the primitive is inlined into a method
2103  // which handles NullPointerExceptions.
2104  _sp += nargs;
2105  do_null_check(receiver, T_OBJECT);
2106  _sp -= nargs;
2107  if (stopped()) {
2108    return true;
2109  }
2110  // Heap pointers get a null-check from the interpreter,
2111  // as a courtesy.  However, this is not guaranteed by Unsafe,
2112  // and it is not possible to fully distinguish unintended nulls
2113  // from intended ones in this API.
2114
2115  if (is_volatile) {
2116    // We need to emit leading and trailing CPU membars (see below) in
2117    // addition to memory membars when is_volatile. This is a little
2118    // too strong, but avoids the need to insert per-alias-type
2119    // volatile membars (for stores; compare Parse::do_put_xxx), which
2120    // we cannot do effectively here because we probably only have a
2121    // rough approximation of type.
2122    need_mem_bar = true;
2123    // For Stores, place a memory ordering barrier now.
2124    if (is_store)
2125      insert_mem_bar(Op_MemBarRelease);
2126  }
2127
2128  // Memory barrier to prevent normal and 'unsafe' accesses from
2129  // bypassing each other.  Happens after null checks, so the
2130  // exception paths do not take memory state from the memory barrier,
2131  // so there's no problems making a strong assert about mixing users
2132  // of safe & unsafe memory.  Otherwise fails in a CTW of rt.jar
2133  // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2134  if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2135
2136  if (!is_store) {
2137    Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
2138    // load value and push onto stack
2139    switch (type) {
2140    case T_BOOLEAN:
2141    case T_CHAR:
2142    case T_BYTE:
2143    case T_SHORT:
2144    case T_INT:
2145    case T_FLOAT:
2146    case T_OBJECT:
2147      push( p );
2148      break;
2149    case T_ADDRESS:
2150      // Cast to an int type.
2151      p = _gvn.transform( new (C, 2) CastP2XNode(NULL,p) );
2152      p = ConvX2L(p);
2153      push_pair(p);
2154      break;
2155    case T_DOUBLE:
2156    case T_LONG:
2157      push_pair( p );
2158      break;
2159    default: ShouldNotReachHere();
2160    }
2161  } else {
2162    // place effect of store into memory
2163    switch (type) {
2164    case T_DOUBLE:
2165      val = dstore_rounding(val);
2166      break;
2167    case T_ADDRESS:
2168      // Repackage the long as a pointer.
2169      val = ConvL2X(val);
2170      val = _gvn.transform( new (C, 2) CastX2PNode(val) );
2171      break;
2172    }
2173
2174    if (type != T_OBJECT ) {
2175      (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
2176    } else {
2177      // Possibly an oop being stored to Java heap or native memory
2178      if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2179        // oop to Java heap.
2180        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2181      } else {
2182        // We can't tell at compile time if we are storing in the Java heap or outside
2183        // of it. So we need to emit code to conditionally do the proper type of
2184        // store.
2185
2186        IdealKit kit(gvn(), control(),  merged_memory());
2187        kit.declares_done();
2188        // QQQ who knows what probability is here??
2189        kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2190          (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
2191        } kit.else_(); {
2192          (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
2193        } kit.end_if();
2194      }
2195    }
2196  }
2197
2198  if (is_volatile) {
2199    if (!is_store)
2200      insert_mem_bar(Op_MemBarAcquire);
2201    else
2202      insert_mem_bar(Op_MemBarVolatile);
2203  }
2204
2205  if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2206
2207  return true;
2208}
2209
2210//----------------------------inline_unsafe_prefetch----------------------------
2211
2212bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) {
2213#ifndef PRODUCT
2214  {
2215    ResourceMark rm;
2216    // Check the signatures.
2217    ciSignature* sig = signature();
2218#ifdef ASSERT
2219    // Object getObject(Object base, int/long offset), etc.
2220    BasicType rtype = sig->return_type()->basic_type();
2221    if (!is_native_ptr) {
2222      assert(sig->count() == 2, "oop prefetch has 2 arguments");
2223      assert(sig->type_at(0)->basic_type() == T_OBJECT, "prefetch base is object");
2224      assert(sig->type_at(1)->basic_type() == T_LONG, "prefetcha offset is correct");
2225    } else {
2226      assert(sig->count() == 1, "native prefetch has 1 argument");
2227      assert(sig->type_at(0)->basic_type() == T_LONG, "prefetch base is long");
2228    }
2229#endif // ASSERT
2230  }
2231#endif // !PRODUCT
2232
2233  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2234
2235  // Argument words:  "this" if not static, plus (oop/offset) or (lo/hi) args
2236  int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3);
2237
2238  debug_only(int saved_sp = _sp);
2239  _sp += nargs;
2240
2241  // Build address expression.  See the code in inline_unsafe_access.
2242  Node *adr;
2243  if (!is_native_ptr) {
2244    // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
2245    Node* offset = pop_pair();
2246    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
2247    Node* base   = pop();
2248    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2249    // to be plain byte offsets, which are also the same as those accepted
2250    // by oopDesc::field_base.
2251    assert(Unsafe_field_offset_to_byte_offset(11) == 11,
2252           "fieldOffset must be byte-scaled");
2253    // 32-bit machines ignore the high half!
2254    offset = ConvL2X(offset);
2255    adr = make_unsafe_address(base, offset);
2256  } else {
2257    Node* ptr = pop_pair();
2258    // Adjust Java long to machine word:
2259    ptr = ConvL2X(ptr);
2260    adr = make_unsafe_address(NULL, ptr);
2261  }
2262
2263  if (is_static) {
2264    assert(saved_sp == _sp, "must have correct argument count");
2265  } else {
2266    // Pop receiver last:  it was pushed first.
2267    Node *receiver = pop();
2268    assert(saved_sp == _sp, "must have correct argument count");
2269
2270    // Null check on self without removing any arguments.  The argument
2271    // null check technically happens in the wrong place, which can lead to
2272    // invalid stack traces when the primitive is inlined into a method
2273    // which handles NullPointerExceptions.
2274    _sp += nargs;
2275    do_null_check(receiver, T_OBJECT);
2276    _sp -= nargs;
2277    if (stopped()) {
2278      return true;
2279    }
2280  }
2281
2282  // Generate the read or write prefetch
2283  Node *prefetch;
2284  if (is_store) {
2285    prefetch = new (C, 3) PrefetchWriteNode(i_o(), adr);
2286  } else {
2287    prefetch = new (C, 3) PrefetchReadNode(i_o(), adr);
2288  }
2289  prefetch->init_req(0, control());
2290  set_i_o(_gvn.transform(prefetch));
2291
2292  return true;
2293}
2294
2295//----------------------------inline_unsafe_CAS----------------------------
2296
2297bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
2298  // This basic scheme here is the same as inline_unsafe_access, but
2299  // differs in enough details that combining them would make the code
2300  // overly confusing.  (This is a true fact! I originally combined
2301  // them, but even I was confused by it!) As much code/comments as
2302  // possible are retained from inline_unsafe_access though to make
2303  // the correspondences clearer. - dl
2304
2305  if (callee()->is_static())  return false;  // caller must have the capability!
2306
2307#ifndef PRODUCT
2308  {
2309    ResourceMark rm;
2310    // Check the signatures.
2311    ciSignature* sig = signature();
2312#ifdef ASSERT
2313    BasicType rtype = sig->return_type()->basic_type();
2314    assert(rtype == T_BOOLEAN, "CAS must return boolean");
2315    assert(sig->count() == 4, "CAS has 4 arguments");
2316    assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
2317    assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
2318#endif // ASSERT
2319  }
2320#endif //PRODUCT
2321
2322  // number of stack slots per value argument (1 or 2)
2323  int type_words = type2size[type];
2324
2325  // Cannot inline wide CAS on machines that don't support it natively
2326  if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8())
2327    return false;
2328
2329  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2330
2331  // Argument words:  "this" plus oop plus offset plus oldvalue plus newvalue;
2332  int nargs = 1 + 1 + 2  + type_words + type_words;
2333
2334  // pop arguments: newval, oldval, offset, base, and receiver
2335  debug_only(int saved_sp = _sp);
2336  _sp += nargs;
2337  Node* newval   = (type_words == 1) ? pop() : pop_pair();
2338  Node* oldval   = (type_words == 1) ? pop() : pop_pair();
2339  Node *offset   = pop_pair();
2340  Node *base     = pop();
2341  Node *receiver = pop();
2342  assert(saved_sp == _sp, "must have correct argument count");
2343
2344  //  Null check receiver.
2345  _sp += nargs;
2346  do_null_check(receiver, T_OBJECT);
2347  _sp -= nargs;
2348  if (stopped()) {
2349    return true;
2350  }
2351
2352  // Build field offset expression.
2353  // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
2354  // to be plain byte offsets, which are also the same as those accepted
2355  // by oopDesc::field_base.
2356  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2357  // 32-bit machines ignore the high half of long offsets
2358  offset = ConvL2X(offset);
2359  Node* adr = make_unsafe_address(base, offset);
2360  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2361
2362  // (Unlike inline_unsafe_access, there seems no point in trying
2363  // to refine types. Just use the coarse types here.
2364  const Type *value_type = Type::get_const_basic_type(type);
2365  Compile::AliasType* alias_type = C->alias_type(adr_type);
2366  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2367  int alias_idx = C->get_alias_index(adr_type);
2368
2369  // Memory-model-wise, a CAS acts like a little synchronized block,
2370  // so needs barriers on each side.  These don't translate into
2371  // actual barriers on most machines, but we still need rest of
2372  // compiler to respect ordering.
2373
2374  insert_mem_bar(Op_MemBarRelease);
2375  insert_mem_bar(Op_MemBarCPUOrder);
2376
2377  // 4984716: MemBars must be inserted before this
2378  //          memory node in order to avoid a false
2379  //          dependency which will confuse the scheduler.
2380  Node *mem = memory(alias_idx);
2381
2382  // For now, we handle only those cases that actually exist: ints,
2383  // longs, and Object. Adding others should be straightforward.
2384  Node* cas;
2385  switch(type) {
2386  case T_INT:
2387    cas = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
2388    break;
2389  case T_LONG:
2390    cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
2391    break;
2392  case T_OBJECT:
2393     // reference stores need a store barrier.
2394    // (They don't if CAS fails, but it isn't worth checking.)
2395    pre_barrier(control(), base, adr, alias_idx, newval, value_type->make_oopptr(), T_OBJECT);
2396#ifdef _LP64
2397    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2398      Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2399      Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2400      cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
2401                                                          newval_enc, oldval_enc));
2402    } else
2403#endif
2404    {
2405      cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
2406    }
2407    post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
2408    break;
2409  default:
2410    ShouldNotReachHere();
2411    break;
2412  }
2413
2414  // SCMemProjNodes represent the memory state of CAS. Their main
2415  // role is to prevent CAS nodes from being optimized away when their
2416  // results aren't used.
2417  Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
2418  set_memory(proj, alias_idx);
2419
2420  // Add the trailing membar surrounding the access
2421  insert_mem_bar(Op_MemBarCPUOrder);
2422  insert_mem_bar(Op_MemBarAcquire);
2423
2424  push(cas);
2425  return true;
2426}
2427
2428bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
2429  // This is another variant of inline_unsafe_access, differing in
2430  // that it always issues store-store ("release") barrier and ensures
2431  // store-atomicity (which only matters for "long").
2432
2433  if (callee()->is_static())  return false;  // caller must have the capability!
2434
2435#ifndef PRODUCT
2436  {
2437    ResourceMark rm;
2438    // Check the signatures.
2439    ciSignature* sig = signature();
2440#ifdef ASSERT
2441    BasicType rtype = sig->return_type()->basic_type();
2442    assert(rtype == T_VOID, "must return void");
2443    assert(sig->count() == 3, "has 3 arguments");
2444    assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object");
2445    assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long");
2446#endif // ASSERT
2447  }
2448#endif //PRODUCT
2449
2450  // number of stack slots per value argument (1 or 2)
2451  int type_words = type2size[type];
2452
2453  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
2454
2455  // Argument words:  "this" plus oop plus offset plus value;
2456  int nargs = 1 + 1 + 2 + type_words;
2457
2458  // pop arguments: val, offset, base, and receiver
2459  debug_only(int saved_sp = _sp);
2460  _sp += nargs;
2461  Node* val      = (type_words == 1) ? pop() : pop_pair();
2462  Node *offset   = pop_pair();
2463  Node *base     = pop();
2464  Node *receiver = pop();
2465  assert(saved_sp == _sp, "must have correct argument count");
2466
2467  //  Null check receiver.
2468  _sp += nargs;
2469  do_null_check(receiver, T_OBJECT);
2470  _sp -= nargs;
2471  if (stopped()) {
2472    return true;
2473  }
2474
2475  // Build field offset expression.
2476  assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
2477  // 32-bit machines ignore the high half of long offsets
2478  offset = ConvL2X(offset);
2479  Node* adr = make_unsafe_address(base, offset);
2480  const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
2481  const Type *value_type = Type::get_const_basic_type(type);
2482  Compile::AliasType* alias_type = C->alias_type(adr_type);
2483
2484  insert_mem_bar(Op_MemBarRelease);
2485  insert_mem_bar(Op_MemBarCPUOrder);
2486  // Ensure that the store is atomic for longs:
2487  bool require_atomic_access = true;
2488  Node* store;
2489  if (type == T_OBJECT) // reference stores need a store barrier.
2490    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
2491  else {
2492    store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
2493  }
2494  insert_mem_bar(Op_MemBarCPUOrder);
2495  return true;
2496}
2497
2498bool LibraryCallKit::inline_unsafe_allocate() {
2499  if (callee()->is_static())  return false;  // caller must have the capability!
2500  int nargs = 1 + 1;
2501  assert(signature()->size() == nargs-1, "alloc has 1 argument");
2502  null_check_receiver(callee());  // check then ignore argument(0)
2503  _sp += nargs;  // set original stack for use by uncommon_trap
2504  Node* cls = do_null_check(argument(1), T_OBJECT);
2505  _sp -= nargs;
2506  if (stopped())  return true;
2507
2508  Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
2509  _sp += nargs;  // set original stack for use by uncommon_trap
2510  kls = do_null_check(kls, T_OBJECT);
2511  _sp -= nargs;
2512  if (stopped())  return true;  // argument was like int.class
2513
2514  // Note:  The argument might still be an illegal value like
2515  // Serializable.class or Object[].class.   The runtime will handle it.
2516  // But we must make an explicit check for initialization.
2517  Node* insp = basic_plus_adr(kls, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc));
2518  Node* inst = make_load(NULL, insp, TypeInt::INT, T_INT);
2519  Node* bits = intcon(instanceKlass::fully_initialized);
2520  Node* test = _gvn.transform( new (C, 3) SubINode(inst, bits) );
2521  // The 'test' is non-zero if we need to take a slow path.
2522
2523  Node* obj = new_instance(kls, test);
2524  push(obj);
2525
2526  return true;
2527}
2528
2529//------------------------inline_native_time_funcs--------------
2530// inline code for System.currentTimeMillis() and System.nanoTime()
2531// these have the same type and signature
2532bool LibraryCallKit::inline_native_time_funcs(bool isNano) {
2533  address funcAddr = isNano ? CAST_FROM_FN_PTR(address, os::javaTimeNanos) :
2534                              CAST_FROM_FN_PTR(address, os::javaTimeMillis);
2535  const char * funcName = isNano ? "nanoTime" : "currentTimeMillis";
2536  const TypeFunc *tf = OptoRuntime::current_time_millis_Type();
2537  const TypePtr* no_memory_effects = NULL;
2538  Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
2539  Node* value = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms+0));
2540#ifdef ASSERT
2541  Node* value_top = _gvn.transform(new (C, 1) ProjNode(time, TypeFunc::Parms + 1));
2542  assert(value_top == top(), "second value must be top");
2543#endif
2544  push_pair(value);
2545  return true;
2546}
2547
2548//------------------------inline_native_currentThread------------------
2549bool LibraryCallKit::inline_native_currentThread() {
2550  Node* junk = NULL;
2551  push(generate_current_thread(junk));
2552  return true;
2553}
2554
2555//------------------------inline_native_isInterrupted------------------
2556bool LibraryCallKit::inline_native_isInterrupted() {
2557  const int nargs = 1+1;  // receiver + boolean
2558  assert(nargs == arg_size(), "sanity");
2559  // Add a fast path to t.isInterrupted(clear_int):
2560  //   (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
2561  //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
2562  // So, in the common case that the interrupt bit is false,
2563  // we avoid making a call into the VM.  Even if the interrupt bit
2564  // is true, if the clear_int argument is false, we avoid the VM call.
2565  // However, if the receiver is not currentThread, we must call the VM,
2566  // because there must be some locking done around the operation.
2567
2568  // We only go to the fast case code if we pass two guards.
2569  // Paths which do not pass are accumulated in the slow_region.
2570  RegionNode* slow_region = new (C, 1) RegionNode(1);
2571  record_for_igvn(slow_region);
2572  RegionNode* result_rgn = new (C, 4) RegionNode(1+3); // fast1, fast2, slow
2573  PhiNode*    result_val = new (C, 4) PhiNode(result_rgn, TypeInt::BOOL);
2574  enum { no_int_result_path   = 1,
2575         no_clear_result_path = 2,
2576         slow_result_path     = 3
2577  };
2578
2579  // (a) Receiving thread must be the current thread.
2580  Node* rec_thr = argument(0);
2581  Node* tls_ptr = NULL;
2582  Node* cur_thr = generate_current_thread(tls_ptr);
2583  Node* cmp_thr = _gvn.transform( new (C, 3) CmpPNode(cur_thr, rec_thr) );
2584  Node* bol_thr = _gvn.transform( new (C, 2) BoolNode(cmp_thr, BoolTest::ne) );
2585
2586  bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
2587  if (!known_current_thread)
2588    generate_slow_guard(bol_thr, slow_region);
2589
2590  // (b) Interrupt bit on TLS must be false.
2591  Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
2592  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
2593  p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
2594  // Set the control input on the field _interrupted read to prevent it floating up.
2595  Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
2596  Node* cmp_bit = _gvn.transform( new (C, 3) CmpINode(int_bit, intcon(0)) );
2597  Node* bol_bit = _gvn.transform( new (C, 2) BoolNode(cmp_bit, BoolTest::ne) );
2598
2599  IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
2600
2601  // First fast path:  if (!TLS._interrupted) return false;
2602  Node* false_bit = _gvn.transform( new (C, 1) IfFalseNode(iff_bit) );
2603  result_rgn->init_req(no_int_result_path, false_bit);
2604  result_val->init_req(no_int_result_path, intcon(0));
2605
2606  // drop through to next case
2607  set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_bit)) );
2608
2609  // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path.
2610  Node* clr_arg = argument(1);
2611  Node* cmp_arg = _gvn.transform( new (C, 3) CmpINode(clr_arg, intcon(0)) );
2612  Node* bol_arg = _gvn.transform( new (C, 2) BoolNode(cmp_arg, BoolTest::ne) );
2613  IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN);
2614
2615  // Second fast path:  ... else if (!clear_int) return true;
2616  Node* false_arg = _gvn.transform( new (C, 1) IfFalseNode(iff_arg) );
2617  result_rgn->init_req(no_clear_result_path, false_arg);
2618  result_val->init_req(no_clear_result_path, intcon(1));
2619
2620  // drop through to next case
2621  set_control( _gvn.transform(new (C, 1) IfTrueNode(iff_arg)) );
2622
2623  // (d) Otherwise, go to the slow path.
2624  slow_region->add_req(control());
2625  set_control( _gvn.transform(slow_region) );
2626
2627  if (stopped()) {
2628    // There is no slow path.
2629    result_rgn->init_req(slow_result_path, top());
2630    result_val->init_req(slow_result_path, top());
2631  } else {
2632    // non-virtual because it is a private non-static
2633    CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_isInterrupted);
2634
2635    Node* slow_val = set_results_for_java_call(slow_call);
2636    // this->control() comes from set_results_for_java_call
2637
2638    // If we know that the result of the slow call will be true, tell the optimizer!
2639    if (known_current_thread)  slow_val = intcon(1);
2640
2641    Node* fast_io  = slow_call->in(TypeFunc::I_O);
2642    Node* fast_mem = slow_call->in(TypeFunc::Memory);
2643    // These two phis are pre-filled with copies of of the fast IO and Memory
2644    Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
2645    Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
2646
2647    result_rgn->init_req(slow_result_path, control());
2648    io_phi    ->init_req(slow_result_path, i_o());
2649    mem_phi   ->init_req(slow_result_path, reset_memory());
2650    result_val->init_req(slow_result_path, slow_val);
2651
2652    set_all_memory( _gvn.transform(mem_phi) );
2653    set_i_o(        _gvn.transform(io_phi) );
2654  }
2655
2656  push_result(result_rgn, result_val);
2657  C->set_has_split_ifs(true); // Has chance for split-if optimization
2658
2659  return true;
2660}
2661
2662//---------------------------load_mirror_from_klass----------------------------
2663// Given a klass oop, load its java mirror (a java.lang.Class oop).
2664Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
2665  Node* p = basic_plus_adr(klass, Klass::java_mirror_offset_in_bytes() + sizeof(oopDesc));
2666  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
2667}
2668
2669//-----------------------load_klass_from_mirror_common-------------------------
2670// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
2671// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
2672// and branch to the given path on the region.
2673// If never_see_null, take an uncommon trap on null, so we can optimistically
2674// compile for the non-null case.
2675// If the region is NULL, force never_see_null = true.
2676Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
2677                                                    bool never_see_null,
2678                                                    int nargs,
2679                                                    RegionNode* region,
2680                                                    int null_path,
2681                                                    int offset) {
2682  if (region == NULL)  never_see_null = true;
2683  Node* p = basic_plus_adr(mirror, offset);
2684  const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
2685  Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) );
2686  _sp += nargs; // any deopt will start just before call to enclosing method
2687  Node* null_ctl = top();
2688  kls = null_check_oop(kls, &null_ctl, never_see_null);
2689  if (region != NULL) {
2690    // Set region->in(null_path) if the mirror is a primitive (e.g, int.class).
2691    region->init_req(null_path, null_ctl);
2692  } else {
2693    assert(null_ctl == top(), "no loose ends");
2694  }
2695  _sp -= nargs;
2696  return kls;
2697}
2698
2699//--------------------(inline_native_Class_query helpers)---------------------
2700// Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER.
2701// Fall through if (mods & mask) == bits, take the guard otherwise.
2702Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
2703  // Branch around if the given klass has the given modifier bit set.
2704  // Like generate_guard, adds a new path onto the region.
2705  Node* modp = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
2706  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
2707  Node* mask = intcon(modifier_mask);
2708  Node* bits = intcon(modifier_bits);
2709  Node* mbit = _gvn.transform( new (C, 3) AndINode(mods, mask) );
2710  Node* cmp  = _gvn.transform( new (C, 3) CmpINode(mbit, bits) );
2711  Node* bol  = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
2712  return generate_fair_guard(bol, region);
2713}
2714Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
2715  return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
2716}
2717
2718//-------------------------inline_native_Class_query-------------------
2719bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
2720  int nargs = 1+0;  // just the Class mirror, in most cases
2721  const Type* return_type = TypeInt::BOOL;
2722  Node* prim_return_value = top();  // what happens if it's a primitive class?
2723  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
2724  bool expect_prim = false;     // most of these guys expect to work on refs
2725
2726  enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
2727
2728  switch (id) {
2729  case vmIntrinsics::_isInstance:
2730    nargs = 1+1;  // the Class mirror, plus the object getting queried about
2731    // nothing is an instance of a primitive type
2732    prim_return_value = intcon(0);
2733    break;
2734  case vmIntrinsics::_getModifiers:
2735    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
2736    assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line");
2737    return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin);
2738    break;
2739  case vmIntrinsics::_isInterface:
2740    prim_return_value = intcon(0);
2741    break;
2742  case vmIntrinsics::_isArray:
2743    prim_return_value = intcon(0);
2744    expect_prim = true;  // cf. ObjectStreamClass.getClassSignature
2745    break;
2746  case vmIntrinsics::_isPrimitive:
2747    prim_return_value = intcon(1);
2748    expect_prim = true;  // obviously
2749    break;
2750  case vmIntrinsics::_getSuperclass:
2751    prim_return_value = null();
2752    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
2753    break;
2754  case vmIntrinsics::_getComponentType:
2755    prim_return_value = null();
2756    return_type = TypeInstPtr::MIRROR->cast_to_ptr_type(TypePtr::BotPTR);
2757    break;
2758  case vmIntrinsics::_getClassAccessFlags:
2759    prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
2760    return_type = TypeInt::INT;  // not bool!  6297094
2761    break;
2762  default:
2763    ShouldNotReachHere();
2764  }
2765
2766  Node* mirror =                      argument(0);
2767  Node* obj    = (nargs <= 1)? top(): argument(1);
2768
2769  const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
2770  if (mirror_con == NULL)  return false;  // cannot happen?
2771
2772#ifndef PRODUCT
2773  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
2774    ciType* k = mirror_con->java_mirror_type();
2775    if (k) {
2776      tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
2777      k->print_name();
2778      tty->cr();
2779    }
2780  }
2781#endif
2782
2783  // Null-check the mirror, and the mirror's klass ptr (in case it is a primitive).
2784  RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT);
2785  record_for_igvn(region);
2786  PhiNode* phi = new (C, PATH_LIMIT) PhiNode(region, return_type);
2787
2788  // The mirror will never be null of Reflection.getClassAccessFlags, however
2789  // it may be null for Class.isInstance or Class.getModifiers. Throw a NPE
2790  // if it is. See bug 4774291.
2791
2792  // For Reflection.getClassAccessFlags(), the null check occurs in
2793  // the wrong place; see inline_unsafe_access(), above, for a similar
2794  // situation.
2795  _sp += nargs;  // set original stack for use by uncommon_trap
2796  mirror = do_null_check(mirror, T_OBJECT);
2797  _sp -= nargs;
2798  // If mirror or obj is dead, only null-path is taken.
2799  if (stopped())  return true;
2800
2801  if (expect_prim)  never_see_null = false;  // expect nulls (meaning prims)
2802
2803  // Now load the mirror's klass metaobject, and null-check it.
2804  // Side-effects region with the control path if the klass is null.
2805  Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs,
2806                                     region, _prim_path);
2807  // If kls is null, we have a primitive mirror.
2808  phi->init_req(_prim_path, prim_return_value);
2809  if (stopped()) { push_result(region, phi); return true; }
2810
2811  Node* p;  // handy temp
2812  Node* null_ctl;
2813
2814  // Now that we have the non-null klass, we can perform the real query.
2815  // For constant classes, the query will constant-fold in LoadNode::Value.
2816  Node* query_value = top();
2817  switch (id) {
2818  case vmIntrinsics::_isInstance:
2819    // nothing is an instance of a primitive type
2820    query_value = gen_instanceof(obj, kls);
2821    break;
2822
2823  case vmIntrinsics::_getModifiers:
2824    p = basic_plus_adr(kls, Klass::modifier_flags_offset_in_bytes() + sizeof(oopDesc));
2825    query_value = make_load(NULL, p, TypeInt::INT, T_INT);
2826    break;
2827
2828  case vmIntrinsics::_isInterface:
2829    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
2830    if (generate_interface_guard(kls, region) != NULL)
2831      // A guard was added.  If the guard is taken, it was an interface.
2832      phi->add_req(intcon(1));
2833    // If we fall through, it's a plain class.
2834    query_value = intcon(0);
2835    break;
2836
2837  case vmIntrinsics::_isArray:
2838    // (To verify this code sequence, check the asserts in JVM_IsArrayClass.)
2839    if (generate_array_guard(kls, region) != NULL)
2840      // A guard was added.  If the guard is taken, it was an array.
2841      phi->add_req(intcon(1));
2842    // If we fall through, it's a plain class.
2843    query_value = intcon(0);
2844    break;
2845
2846  case vmIntrinsics::_isPrimitive:
2847    query_value = intcon(0); // "normal" path produces false
2848    break;
2849
2850  case vmIntrinsics::_getSuperclass:
2851    // The rules here are somewhat unfortunate, but we can still do better
2852    // with random logic than with a JNI call.
2853    // Interfaces store null or Object as _super, but must report null.
2854    // Arrays store an intermediate super as _super, but must report Object.
2855    // Other types can report the actual _super.
2856    // (To verify this code sequence, check the asserts in JVM_IsInterface.)
2857    if (generate_interface_guard(kls, region) != NULL)
2858      // A guard was added.  If the guard is taken, it was an interface.
2859      phi->add_req(null());
2860    if (generate_array_guard(kls, region) != NULL)
2861      // A guard was added.  If the guard is taken, it was an array.
2862      phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
2863    // If we fall through, it's a plain class.  Get its _super.
2864    p = basic_plus_adr(kls, Klass::super_offset_in_bytes() + sizeof(oopDesc));
2865    kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) );
2866    null_ctl = top();
2867    kls = null_check_oop(kls, &null_ctl);
2868    if (null_ctl != top()) {
2869      // If the guard is taken, Object.superClass is null (both klass and mirror).
2870      region->add_req(null_ctl);
2871      phi   ->add_req(null());
2872    }
2873    if (!stopped()) {
2874      query_value = load_mirror_from_klass(kls);
2875    }
2876    break;
2877
2878  case vmIntrinsics::_getComponentType:
2879    if (generate_array_guard(kls, region) != NULL) {
2880      // Be sure to pin the oop load to the guard edge just created:
2881      Node* is_array_ctrl = region->in(region->req()-1);
2882      Node* cma = basic_plus_adr(kls, in_bytes(arrayKlass::component_mirror_offset()) + sizeof(oopDesc));
2883      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
2884      phi->add_req(cmo);
2885    }
2886    query_value = null();  // non-array case is null
2887    break;
2888
2889  case vmIntrinsics::_getClassAccessFlags:
2890    p = basic_plus_adr(kls, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc));
2891    query_value = make_load(NULL, p, TypeInt::INT, T_INT);
2892    break;
2893
2894  default:
2895    ShouldNotReachHere();
2896  }
2897
2898  // Fall-through is the normal case of a query to a real class.
2899  phi->init_req(1, query_value);
2900  region->init_req(1, control());
2901
2902  push_result(region, phi);
2903  C->set_has_split_ifs(true); // Has chance for split-if optimization
2904
2905  return true;
2906}
2907
2908//--------------------------inline_native_subtype_check------------------------
2909// This intrinsic takes the JNI calls out of the heart of
2910// UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
2911bool LibraryCallKit::inline_native_subtype_check() {
2912  int nargs = 1+1;  // the Class mirror, plus the other class getting examined
2913
2914  // Pull both arguments off the stack.
2915  Node* args[2];                // two java.lang.Class mirrors: superc, subc
2916  args[0] = argument(0);
2917  args[1] = argument(1);
2918  Node* klasses[2];             // corresponding Klasses: superk, subk
2919  klasses[0] = klasses[1] = top();
2920
2921  enum {
2922    // A full decision tree on {superc is prim, subc is prim}:
2923    _prim_0_path = 1,           // {P,N} => false
2924                                // {P,P} & superc!=subc => false
2925    _prim_same_path,            // {P,P} & superc==subc => true
2926    _prim_1_path,               // {N,P} => false
2927    _ref_subtype_path,          // {N,N} & subtype check wins => true
2928    _both_ref_path,             // {N,N} & subtype check loses => false
2929    PATH_LIMIT
2930  };
2931
2932  RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT);
2933  Node*       phi    = new (C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL);
2934  record_for_igvn(region);
2935
2936  const TypePtr* adr_type = TypeRawPtr::BOTTOM;   // memory type of loads
2937  const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL;
2938  int class_klass_offset = java_lang_Class::klass_offset_in_bytes();
2939
2940  // First null-check both mirrors and load each mirror's klass metaobject.
2941  int which_arg;
2942  for (which_arg = 0; which_arg <= 1; which_arg++) {
2943    Node* arg = args[which_arg];
2944    _sp += nargs;  // set original stack for use by uncommon_trap
2945    arg = do_null_check(arg, T_OBJECT);
2946    _sp -= nargs;
2947    if (stopped())  break;
2948    args[which_arg] = _gvn.transform(arg);
2949
2950    Node* p = basic_plus_adr(arg, class_klass_offset);
2951    Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
2952    klasses[which_arg] = _gvn.transform(kls);
2953  }
2954
2955  // Having loaded both klasses, test each for null.
2956  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
2957  for (which_arg = 0; which_arg <= 1; which_arg++) {
2958    Node* kls = klasses[which_arg];
2959    Node* null_ctl = top();
2960    _sp += nargs;  // set original stack for use by uncommon_trap
2961    kls = null_check_oop(kls, &null_ctl, never_see_null);
2962    _sp -= nargs;
2963    int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
2964    region->init_req(prim_path, null_ctl);
2965    if (stopped())  break;
2966    klasses[which_arg] = kls;
2967  }
2968
2969  if (!stopped()) {
2970    // now we have two reference types, in klasses[0..1]
2971    Node* subk   = klasses[1];  // the argument to isAssignableFrom
2972    Node* superk = klasses[0];  // the receiver
2973    region->set_req(_both_ref_path, gen_subtype_check(subk, superk));
2974    // now we have a successful reference subtype check
2975    region->set_req(_ref_subtype_path, control());
2976  }
2977
2978  // If both operands are primitive (both klasses null), then
2979  // we must return true when they are identical primitives.
2980  // It is convenient to test this after the first null klass check.
2981  set_control(region->in(_prim_0_path)); // go back to first null check
2982  if (!stopped()) {
2983    // Since superc is primitive, make a guard for the superc==subc case.
2984    Node* cmp_eq = _gvn.transform( new (C, 3) CmpPNode(args[0], args[1]) );
2985    Node* bol_eq = _gvn.transform( new (C, 2) BoolNode(cmp_eq, BoolTest::eq) );
2986    generate_guard(bol_eq, region, PROB_FAIR);
2987    if (region->req() == PATH_LIMIT+1) {
2988      // A guard was added.  If the added guard is taken, superc==subc.
2989      region->swap_edges(PATH_LIMIT, _prim_same_path);
2990      region->del_req(PATH_LIMIT);
2991    }
2992    region->set_req(_prim_0_path, control()); // Not equal after all.
2993  }
2994
2995  // these are the only paths that produce 'true':
2996  phi->set_req(_prim_same_path,   intcon(1));
2997  phi->set_req(_ref_subtype_path, intcon(1));
2998
2999  // pull together the cases:
3000  assert(region->req() == PATH_LIMIT, "sane region");
3001  for (uint i = 1; i < region->req(); i++) {
3002    Node* ctl = region->in(i);
3003    if (ctl == NULL || ctl == top()) {
3004      region->set_req(i, top());
3005      phi   ->set_req(i, top());
3006    } else if (phi->in(i) == NULL) {
3007      phi->set_req(i, intcon(0)); // all other paths produce 'false'
3008    }
3009  }
3010
3011  set_control(_gvn.transform(region));
3012  push(_gvn.transform(phi));
3013
3014  return true;
3015}
3016
3017//---------------------generate_array_guard_common------------------------
3018Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
3019                                                  bool obj_array, bool not_array) {
3020  // If obj_array/non_array==false/false:
3021  // Branch around if the given klass is in fact an array (either obj or prim).
3022  // If obj_array/non_array==false/true:
3023  // Branch around if the given klass is not an array klass of any kind.
3024  // If obj_array/non_array==true/true:
3025  // Branch around if the kls is not an oop array (kls is int[], String, etc.)
3026  // If obj_array/non_array==true/false:
3027  // Branch around if the kls is an oop array (Object[] or subtype)
3028  //
3029  // Like generate_guard, adds a new path onto the region.
3030  jint  layout_con = 0;
3031  Node* layout_val = get_layout_helper(kls, layout_con);
3032  if (layout_val == NULL) {
3033    bool query = (obj_array
3034                  ? Klass::layout_helper_is_objArray(layout_con)
3035                  : Klass::layout_helper_is_javaArray(layout_con));
3036    if (query == not_array) {
3037      return NULL;                       // never a branch
3038    } else {                             // always a branch
3039      Node* always_branch = control();
3040      if (region != NULL)
3041        region->add_req(always_branch);
3042      set_control(top());
3043      return always_branch;
3044    }
3045  }
3046  // Now test the correct condition.
3047  jint  nval = (obj_array
3048                ? ((jint)Klass::_lh_array_tag_type_value
3049                   <<    Klass::_lh_array_tag_shift)
3050                : Klass::_lh_neutral_value);
3051  Node* cmp = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(nval)) );
3052  BoolTest::mask btest = BoolTest::lt;  // correct for testing is_[obj]array
3053  // invert the test if we are looking for a non-array
3054  if (not_array)  btest = BoolTest(btest).negate();
3055  Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, btest) );
3056  return generate_fair_guard(bol, region);
3057}
3058
3059
3060//-----------------------inline_native_newArray--------------------------
3061bool LibraryCallKit::inline_native_newArray() {
3062  int nargs = 2;
3063  Node* mirror    = argument(0);
3064  Node* count_val = argument(1);
3065
3066  _sp += nargs;  // set original stack for use by uncommon_trap
3067  mirror = do_null_check(mirror, T_OBJECT);
3068  _sp -= nargs;
3069  // If mirror or obj is dead, only null-path is taken.
3070  if (stopped())  return true;
3071
3072  enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT };
3073  RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
3074  PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
3075                                                      TypeInstPtr::NOTNULL);
3076  PhiNode*    result_io  = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
3077  PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
3078                                                      TypePtr::BOTTOM);
3079
3080  bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3081  Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
3082                                                  nargs,
3083                                                  result_reg, _slow_path);
3084  Node* normal_ctl   = control();
3085  Node* no_array_ctl = result_reg->in(_slow_path);
3086
3087  // Generate code for the slow case.  We make a call to newArray().
3088  set_control(no_array_ctl);
3089  if (!stopped()) {
3090    // Either the input type is void.class, or else the
3091    // array klass has not yet been cached.  Either the
3092    // ensuing call will throw an exception, or else it
3093    // will cache the array klass for next time.
3094    PreserveJVMState pjvms(this);
3095    CallJavaNode* slow_call = generate_method_call_static(vmIntrinsics::_newArray);
3096    Node* slow_result = set_results_for_java_call(slow_call);
3097    // this->control() comes from set_results_for_java_call
3098    result_reg->set_req(_slow_path, control());
3099    result_val->set_req(_slow_path, slow_result);
3100    result_io ->set_req(_slow_path, i_o());
3101    result_mem->set_req(_slow_path, reset_memory());
3102  }
3103
3104  set_control(normal_ctl);
3105  if (!stopped()) {
3106    // Normal case:  The array type has been cached in the java.lang.Class.
3107    // The following call works fine even if the array type is polymorphic.
3108    // It could be a dynamic mix of int[], boolean[], Object[], etc.
3109    Node* obj = new_array(klass_node, count_val, nargs);
3110    result_reg->init_req(_normal_path, control());
3111    result_val->init_req(_normal_path, obj);
3112    result_io ->init_req(_normal_path, i_o());
3113    result_mem->init_req(_normal_path, reset_memory());
3114  }
3115
3116  // Return the combined state.
3117  set_i_o(        _gvn.transform(result_io)  );
3118  set_all_memory( _gvn.transform(result_mem) );
3119  push_result(result_reg, result_val);
3120  C->set_has_split_ifs(true); // Has chance for split-if optimization
3121
3122  return true;
3123}
3124
3125//----------------------inline_native_getLength--------------------------
3126bool LibraryCallKit::inline_native_getLength() {
3127  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3128
3129  int nargs = 1;
3130  Node* array = argument(0);
3131
3132  _sp += nargs;  // set original stack for use by uncommon_trap
3133  array = do_null_check(array, T_OBJECT);
3134  _sp -= nargs;
3135
3136  // If array is dead, only null-path is taken.
3137  if (stopped())  return true;
3138
3139  // Deoptimize if it is a non-array.
3140  Node* non_array = generate_non_array_guard(load_object_klass(array), NULL);
3141
3142  if (non_array != NULL) {
3143    PreserveJVMState pjvms(this);
3144    set_control(non_array);
3145    _sp += nargs;  // push the arguments back on the stack
3146    uncommon_trap(Deoptimization::Reason_intrinsic,
3147                  Deoptimization::Action_maybe_recompile);
3148  }
3149
3150  // If control is dead, only non-array-path is taken.
3151  if (stopped())  return true;
3152
3153  // The works fine even if the array type is polymorphic.
3154  // It could be a dynamic mix of int[], boolean[], Object[], etc.
3155  push( load_array_length(array) );
3156
3157  C->set_has_split_ifs(true); // Has chance for split-if optimization
3158
3159  return true;
3160}
3161
3162//------------------------inline_array_copyOf----------------------------
3163bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
3164  if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
3165
3166  // Restore the stack and pop off the arguments.
3167  int nargs = 3 + (is_copyOfRange? 1: 0);
3168  Node* original          = argument(0);
3169  Node* start             = is_copyOfRange? argument(1): intcon(0);
3170  Node* end               = is_copyOfRange? argument(2): argument(1);
3171  Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
3172
3173  _sp += nargs;  // set original stack for use by uncommon_trap
3174  array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
3175  original          = do_null_check(original, T_OBJECT);
3176  _sp -= nargs;
3177
3178  // Check if a null path was taken unconditionally.
3179  if (stopped())  return true;
3180
3181  Node* orig_length = load_array_length(original);
3182
3183  Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nargs,
3184                                            NULL, 0);
3185  _sp += nargs;  // set original stack for use by uncommon_trap
3186  klass_node = do_null_check(klass_node, T_OBJECT);
3187  _sp -= nargs;
3188
3189  RegionNode* bailout = new (C, 1) RegionNode(1);
3190  record_for_igvn(bailout);
3191
3192  // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
3193  // Bail out if that is so.
3194  Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
3195  if (not_objArray != NULL) {
3196    // Improve the klass node's type from the new optimistic assumption:
3197    ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
3198    const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
3199    Node* cast = new (C, 2) CastPPNode(klass_node, akls);
3200    cast->init_req(0, control());
3201    klass_node = _gvn.transform(cast);
3202  }
3203
3204  // Bail out if either start or end is negative.
3205  generate_negative_guard(start, bailout, &start);
3206  generate_negative_guard(end,   bailout, &end);
3207
3208  Node* length = end;
3209  if (_gvn.type(start) != TypeInt::ZERO) {
3210    length = _gvn.transform( new (C, 3) SubINode(end, start) );
3211  }
3212
3213  // Bail out if length is negative.
3214  // ...Not needed, since the new_array will throw the right exception.
3215  //generate_negative_guard(length, bailout, &length);
3216
3217  if (bailout->req() > 1) {
3218    PreserveJVMState pjvms(this);
3219    set_control( _gvn.transform(bailout) );
3220    _sp += nargs;  // push the arguments back on the stack
3221    uncommon_trap(Deoptimization::Reason_intrinsic,
3222                  Deoptimization::Action_maybe_recompile);
3223  }
3224
3225  if (!stopped()) {
3226    // How many elements will we copy from the original?
3227    // The answer is MinI(orig_length - start, length).
3228    Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
3229    Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
3230
3231    const bool raw_mem_only = true;
3232    Node* newcopy = new_array(klass_node, length, nargs, raw_mem_only);
3233
3234    // Generate a direct call to the right arraycopy function(s).
3235    // We know the copy is disjoint but we might not know if the
3236    // oop stores need checking.
3237    // Extreme case:  Arrays.copyOf((Integer[])x, 10, String[].class).
3238    // This will fail a store-check if x contains any non-nulls.
3239    bool disjoint_bases = true;
3240    bool length_never_negative = true;
3241    generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
3242                       original, start, newcopy, intcon(0), moved,
3243                       disjoint_bases, length_never_negative);
3244
3245    push(newcopy);
3246  }
3247
3248  C->set_has_split_ifs(true); // Has chance for split-if optimization
3249
3250  return true;
3251}
3252
3253
3254//----------------------generate_virtual_guard---------------------------
3255// Helper for hashCode and clone.  Peeks inside the vtable to avoid a call.
3256Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass,
3257                                             RegionNode* slow_region) {
3258  ciMethod* method = callee();
3259  int vtable_index = method->vtable_index();
3260  // Get the methodOop out of the appropriate vtable entry.
3261  int entry_offset  = (instanceKlass::vtable_start_offset() +
3262                     vtable_index*vtableEntry::size()) * wordSize +
3263                     vtableEntry::method_offset_in_bytes();
3264  Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
3265  Node* target_call = make_load(NULL, entry_addr, TypeInstPtr::NOTNULL, T_OBJECT);
3266
3267  // Compare the target method with the expected method (e.g., Object.hashCode).
3268  const TypeInstPtr* native_call_addr = TypeInstPtr::make(method);
3269
3270  Node* native_call = makecon(native_call_addr);
3271  Node* chk_native  = _gvn.transform( new(C, 3) CmpPNode(target_call, native_call) );
3272  Node* test_native = _gvn.transform( new(C, 2) BoolNode(chk_native, BoolTest::ne) );
3273
3274  return generate_slow_guard(test_native, slow_region);
3275}
3276
3277//-----------------------generate_method_call----------------------------
3278// Use generate_method_call to make a slow-call to the real
3279// method if the fast path fails.  An alternative would be to
3280// use a stub like OptoRuntime::slow_arraycopy_Java.
3281// This only works for expanding the current library call,
3282// not another intrinsic.  (E.g., don't use this for making an
3283// arraycopy call inside of the copyOf intrinsic.)
3284CallJavaNode*
3285LibraryCallKit::generate_method_call(vmIntrinsics::ID method_id, bool is_virtual, bool is_static) {
3286  // When compiling the intrinsic method itself, do not use this technique.
3287  guarantee(callee() != C->method(), "cannot make slow-call to self");
3288
3289  ciMethod* method = callee();
3290  // ensure the JVMS we have will be correct for this call
3291  guarantee(method_id == method->intrinsic_id(), "must match");
3292
3293  const TypeFunc* tf = TypeFunc::make(method);
3294  int tfdc = tf->domain()->cnt();
3295  CallJavaNode* slow_call;
3296  if (is_static) {
3297    assert(!is_virtual, "");
3298    slow_call = new(C, tfdc) CallStaticJavaNode(tf,
3299                                SharedRuntime::get_resolve_static_call_stub(),
3300                                method, bci());
3301  } else if (is_virtual) {
3302    null_check_receiver(method);
3303    int vtable_index = methodOopDesc::invalid_vtable_index;
3304    if (UseInlineCaches) {
3305      // Suppress the vtable call
3306    } else {
3307      // hashCode and clone are not a miranda methods,
3308      // so the vtable index is fixed.
3309      // No need to use the linkResolver to get it.
3310       vtable_index = method->vtable_index();
3311    }
3312    slow_call = new(C, tfdc) CallDynamicJavaNode(tf,
3313                                SharedRuntime::get_resolve_virtual_call_stub(),
3314                                method, vtable_index, bci());
3315  } else {  // neither virtual nor static:  opt_virtual
3316    null_check_receiver(method);
3317    slow_call = new(C, tfdc) CallStaticJavaNode(tf,
3318                                SharedRuntime::get_resolve_opt_virtual_call_stub(),
3319                                method, bci());
3320    slow_call->set_optimized_virtual(true);
3321  }
3322  set_arguments_for_java_call(slow_call);
3323  set_edges_for_java_call(slow_call);
3324  return slow_call;
3325}
3326
3327
3328//------------------------------inline_native_hashcode--------------------
3329// Build special case code for calls to hashCode on an object.
3330bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) {
3331  assert(is_static == callee()->is_static(), "correct intrinsic selection");
3332  assert(!(is_virtual && is_static), "either virtual, special, or static");
3333
3334  enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT };
3335
3336  RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
3337  PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
3338                                                      TypeInt::INT);
3339  PhiNode*    result_io  = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
3340  PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
3341                                                      TypePtr::BOTTOM);
3342  Node* obj = NULL;
3343  if (!is_static) {
3344    // Check for hashing null object
3345    obj = null_check_receiver(callee());
3346    if (stopped())  return true;        // unconditionally null
3347    result_reg->init_req(_null_path, top());
3348    result_val->init_req(_null_path, top());
3349  } else {
3350    // Do a null check, and return zero if null.
3351    // System.identityHashCode(null) == 0
3352    obj = argument(0);
3353    Node* null_ctl = top();
3354    obj = null_check_oop(obj, &null_ctl);
3355    result_reg->init_req(_null_path, null_ctl);
3356    result_val->init_req(_null_path, _gvn.intcon(0));
3357  }
3358
3359  // Unconditionally null?  Then return right away.
3360  if (stopped()) {
3361    set_control( result_reg->in(_null_path) );
3362    if (!stopped())
3363      push(      result_val ->in(_null_path) );
3364    return true;
3365  }
3366
3367  // After null check, get the object's klass.
3368  Node* obj_klass = load_object_klass(obj);
3369
3370  // This call may be virtual (invokevirtual) or bound (invokespecial).
3371  // For each case we generate slightly different code.
3372
3373  // We only go to the fast case code if we pass a number of guards.  The
3374  // paths which do not pass are accumulated in the slow_region.
3375  RegionNode* slow_region = new (C, 1) RegionNode(1);
3376  record_for_igvn(slow_region);
3377
3378  // If this is a virtual call, we generate a funny guard.  We pull out
3379  // the vtable entry corresponding to hashCode() from the target object.
3380  // If the target method which we are calling happens to be the native
3381  // Object hashCode() method, we pass the guard.  We do not need this
3382  // guard for non-virtual calls -- the caller is known to be the native
3383  // Object hashCode().
3384  if (is_virtual) {
3385    generate_virtual_guard(obj_klass, slow_region);
3386  }
3387
3388  // Get the header out of the object, use LoadMarkNode when available
3389  Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3390  Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS);
3391  header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) );
3392
3393  // Test the header to see if it is unlocked.
3394  Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
3395  Node *lmasked_header = _gvn.transform( new (C, 3) AndXNode(header, lock_mask) );
3396  Node *unlocked_val   = _gvn.MakeConX(markOopDesc::unlocked_value);
3397  Node *chk_unlocked   = _gvn.transform( new (C, 3) CmpXNode( lmasked_header, unlocked_val));
3398  Node *test_unlocked  = _gvn.transform( new (C, 2) BoolNode( chk_unlocked, BoolTest::ne) );
3399
3400  generate_slow_guard(test_unlocked, slow_region);
3401
3402  // Get the hash value and check to see that it has been properly assigned.
3403  // We depend on hash_mask being at most 32 bits and avoid the use of
3404  // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
3405  // vm: see markOop.hpp.
3406  Node *hash_mask      = _gvn.intcon(markOopDesc::hash_mask);
3407  Node *hash_shift     = _gvn.intcon(markOopDesc::hash_shift);
3408  Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) );
3409  // This hack lets the hash bits live anywhere in the mark object now, as long
3410  // as the shift drops the relevant bits into the low 32 bits.  Note that
3411  // Java spec says that HashCode is an int so there's no point in capturing
3412  // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3413  hshifted_header      = ConvX2I(hshifted_header);
3414  Node *hash_val       = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) );
3415
3416  Node *no_hash_val    = _gvn.intcon(markOopDesc::no_hash);
3417  Node *chk_assigned   = _gvn.transform( new (C, 3) CmpINode( hash_val, no_hash_val));
3418  Node *test_assigned  = _gvn.transform( new (C, 2) BoolNode( chk_assigned, BoolTest::eq) );
3419
3420  generate_slow_guard(test_assigned, slow_region);
3421
3422  Node* init_mem = reset_memory();
3423  // fill in the rest of the null path:
3424  result_io ->init_req(_null_path, i_o());
3425  result_mem->init_req(_null_path, init_mem);
3426
3427  result_val->init_req(_fast_path, hash_val);
3428  result_reg->init_req(_fast_path, control());
3429  result_io ->init_req(_fast_path, i_o());
3430  result_mem->init_req(_fast_path, init_mem);
3431
3432  // Generate code for the slow case.  We make a call to hashCode().
3433  set_control(_gvn.transform(slow_region));
3434  if (!stopped()) {
3435    // No need for PreserveJVMState, because we're using up the present state.
3436    set_all_memory(init_mem);
3437    vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode;
3438    if (is_static)   hashCode_id = vmIntrinsics::_identityHashCode;
3439    CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
3440    Node* slow_result = set_results_for_java_call(slow_call);
3441    // this->control() comes from set_results_for_java_call
3442    result_reg->init_req(_slow_path, control());
3443    result_val->init_req(_slow_path, slow_result);
3444    result_io  ->set_req(_slow_path, i_o());
3445    result_mem ->set_req(_slow_path, reset_memory());
3446  }
3447
3448  // Return the combined state.
3449  set_i_o(        _gvn.transform(result_io)  );
3450  set_all_memory( _gvn.transform(result_mem) );
3451  push_result(result_reg, result_val);
3452
3453  return true;
3454}
3455
3456//---------------------------inline_native_getClass----------------------------
3457// Build special case code for calls to getClass on an object.
3458bool LibraryCallKit::inline_native_getClass() {
3459  Node* obj = null_check_receiver(callee());
3460  if (stopped())  return true;
3461  push( load_mirror_from_klass(load_object_klass(obj)) );
3462  return true;
3463}
3464
3465//-----------------inline_native_Reflection_getCallerClass---------------------
3466// In the presence of deep enough inlining, getCallerClass() becomes a no-op.
3467//
3468// NOTE that this code must perform the same logic as
3469// vframeStream::security_get_caller_frame in that it must skip
3470// Method.invoke() and auxiliary frames.
3471
3472
3473
3474
3475bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
3476  ciMethod*       method = callee();
3477
3478#ifndef PRODUCT
3479  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3480    tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
3481  }
3482#endif
3483
3484  debug_only(int saved_sp = _sp);
3485
3486  // Argument words:  (int depth)
3487  int nargs = 1;
3488
3489  _sp += nargs;
3490  Node* caller_depth_node = pop();
3491
3492  assert(saved_sp == _sp, "must have correct argument count");
3493
3494  // The depth value must be a constant in order for the runtime call
3495  // to be eliminated.
3496  const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int();
3497  if (caller_depth_type == NULL || !caller_depth_type->is_con()) {
3498#ifndef PRODUCT
3499    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3500      tty->print_cr("  Bailing out because caller depth was not a constant");
3501    }
3502#endif
3503    return false;
3504  }
3505  // Note that the JVM state at this point does not include the
3506  // getCallerClass() frame which we are trying to inline. The
3507  // semantics of getCallerClass(), however, are that the "first"
3508  // frame is the getCallerClass() frame, so we subtract one from the
3509  // requested depth before continuing. We don't inline requests of
3510  // getCallerClass(0).
3511  int caller_depth = caller_depth_type->get_con() - 1;
3512  if (caller_depth < 0) {
3513#ifndef PRODUCT
3514    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3515      tty->print_cr("  Bailing out because caller depth was %d", caller_depth);
3516    }
3517#endif
3518    return false;
3519  }
3520
3521  if (!jvms()->has_method()) {
3522#ifndef PRODUCT
3523    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3524      tty->print_cr("  Bailing out because intrinsic was inlined at top level");
3525    }
3526#endif
3527    return false;
3528  }
3529  int _depth = jvms()->depth();  // cache call chain depth
3530
3531  // Walk back up the JVM state to find the caller at the required
3532  // depth. NOTE that this code must perform the same logic as
3533  // vframeStream::security_get_caller_frame in that it must skip
3534  // Method.invoke() and auxiliary frames. Note also that depth is
3535  // 1-based (1 is the bottom of the inlining).
3536  int inlining_depth = _depth;
3537  JVMState* caller_jvms = NULL;
3538
3539  if (inlining_depth > 0) {
3540    caller_jvms = jvms();
3541    assert(caller_jvms = jvms()->of_depth(inlining_depth), "inlining_depth == our depth");
3542    do {
3543      // The following if-tests should be performed in this order
3544      if (is_method_invoke_or_aux_frame(caller_jvms)) {
3545        // Skip a Method.invoke() or auxiliary frame
3546      } else if (caller_depth > 0) {
3547        // Skip real frame
3548        --caller_depth;
3549      } else {
3550        // We're done: reached desired caller after skipping.
3551        break;
3552      }
3553      caller_jvms = caller_jvms->caller();
3554      --inlining_depth;
3555    } while (inlining_depth > 0);
3556  }
3557
3558  if (inlining_depth == 0) {
3559#ifndef PRODUCT
3560    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3561      tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
3562      tty->print_cr("  JVM state at this point:");
3563      for (int i = _depth; i >= 1; i--) {
3564        tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
3565      }
3566    }
3567#endif
3568    return false; // Reached end of inlining
3569  }
3570
3571  // Acquire method holder as java.lang.Class
3572  ciInstanceKlass* caller_klass  = caller_jvms->method()->holder();
3573  ciInstance*      caller_mirror = caller_klass->java_mirror();
3574  // Push this as a constant
3575  push(makecon(TypeInstPtr::make(caller_mirror)));
3576#ifndef PRODUCT
3577  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
3578    tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
3579    tty->print_cr("  JVM state at this point:");
3580    for (int i = _depth; i >= 1; i--) {
3581      tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
3582    }
3583  }
3584#endif
3585  return true;
3586}
3587
3588// Helper routine for above
3589bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
3590  // Is this the Method.invoke method itself?
3591  if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
3592    return true;
3593
3594  // Is this a helper, defined somewhere underneath MethodAccessorImpl.
3595  ciKlass* k = jvms->method()->holder();
3596  if (k->is_instance_klass()) {
3597    ciInstanceKlass* ik = k->as_instance_klass();
3598    for (; ik != NULL; ik = ik->super()) {
3599      if (ik->name() == ciSymbol::sun_reflect_MethodAccessorImpl() &&
3600          ik == env()->find_system_klass(ik->name())) {
3601        return true;
3602      }
3603    }
3604  }
3605
3606  return false;
3607}
3608
3609static int value_field_offset = -1;  // offset of the "value" field of AtomicLongCSImpl.  This is needed by
3610                                     // inline_native_AtomicLong_attemptUpdate() but it has no way of
3611                                     // computing it since there is no lookup field by name function in the
3612                                     // CI interface.  This is computed and set by inline_native_AtomicLong_get().
3613                                     // Using a static variable here is safe even if we have multiple compilation
3614                                     // threads because the offset is constant.  At worst the same offset will be
3615                                     // computed and  stored multiple
3616
3617bool LibraryCallKit::inline_native_AtomicLong_get() {
3618  // Restore the stack and pop off the argument
3619  _sp+=1;
3620  Node *obj = pop();
3621
3622  // get the offset of the "value" field. Since the CI interfaces
3623  // does not provide a way to look up a field by name, we scan the bytecodes
3624  // to get the field index.  We expect the first 2 instructions of the method
3625  // to be:
3626  //    0 aload_0
3627  //    1 getfield "value"
3628  ciMethod* method = callee();
3629  if (value_field_offset == -1)
3630  {
3631    ciField* value_field;
3632    ciBytecodeStream iter(method);
3633    Bytecodes::Code bc = iter.next();
3634
3635    if ((bc != Bytecodes::_aload_0) &&
3636              ((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
3637      return false;
3638    bc = iter.next();
3639    if (bc != Bytecodes::_getfield)
3640      return false;
3641    bool ignore;
3642    value_field = iter.get_field(ignore);
3643    value_field_offset = value_field->offset_in_bytes();
3644  }
3645
3646  // Null check without removing any arguments.
3647  _sp++;
3648  obj = do_null_check(obj, T_OBJECT);
3649  _sp--;
3650  // Check for locking null object
3651  if (stopped()) return true;
3652
3653  Node *adr = basic_plus_adr(obj, obj, value_field_offset);
3654  const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
3655  int alias_idx = C->get_alias_index(adr_type);
3656
3657  Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
3658
3659  push_pair(result);
3660
3661  return true;
3662}
3663
3664bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
3665  // Restore the stack and pop off the arguments
3666  _sp+=5;
3667  Node *newVal = pop_pair();
3668  Node *oldVal = pop_pair();
3669  Node *obj = pop();
3670
3671  // we need the offset of the "value" field which was computed when
3672  // inlining the get() method.  Give up if we don't have it.
3673  if (value_field_offset == -1)
3674    return false;
3675
3676  // Null check without removing any arguments.
3677  _sp+=5;
3678  obj = do_null_check(obj, T_OBJECT);
3679  _sp-=5;
3680  // Check for locking null object
3681  if (stopped()) return true;
3682
3683  Node *adr = basic_plus_adr(obj, obj, value_field_offset);
3684  const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
3685  int alias_idx = C->get_alias_index(adr_type);
3686
3687  Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
3688  Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
3689  set_memory(store_proj, alias_idx);
3690  Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
3691
3692  Node *result;
3693  // CMove node is not used to be able fold a possible check code
3694  // after attemptUpdate() call. This code could be transformed
3695  // into CMove node by loop optimizations.
3696  {
3697    RegionNode *r = new (C, 3) RegionNode(3);
3698    result = new (C, 3) PhiNode(r, TypeInt::BOOL);
3699
3700    Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
3701    Node *iftrue = opt_iff(r, iff);
3702    r->init_req(1, iftrue);
3703    result->init_req(1, intcon(1));
3704    result->init_req(2, intcon(0));
3705
3706    set_control(_gvn.transform(r));
3707    record_for_igvn(r);
3708
3709    C->set_has_split_ifs(true); // Has chance for split-if optimization
3710  }
3711
3712  push(_gvn.transform(result));
3713  return true;
3714}
3715
3716bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
3717  // restore the arguments
3718  _sp += arg_size();
3719
3720  switch (id) {
3721  case vmIntrinsics::_floatToRawIntBits:
3722    push(_gvn.transform( new (C, 2) MoveF2INode(pop())));
3723    break;
3724
3725  case vmIntrinsics::_intBitsToFloat:
3726    push(_gvn.transform( new (C, 2) MoveI2FNode(pop())));
3727    break;
3728
3729  case vmIntrinsics::_doubleToRawLongBits:
3730    push_pair(_gvn.transform( new (C, 2) MoveD2LNode(pop_pair())));
3731    break;
3732
3733  case vmIntrinsics::_longBitsToDouble:
3734    push_pair(_gvn.transform( new (C, 2) MoveL2DNode(pop_pair())));
3735    break;
3736
3737  case vmIntrinsics::_doubleToLongBits: {
3738    Node* value = pop_pair();
3739
3740    // two paths (plus control) merge in a wood
3741    RegionNode *r = new (C, 3) RegionNode(3);
3742    Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG);
3743
3744    Node *cmpisnan = _gvn.transform( new (C, 3) CmpDNode(value, value));
3745    // Build the boolean node
3746    Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
3747
3748    // Branch either way.
3749    // NaN case is less traveled, which makes all the difference.
3750    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3751    Node *opt_isnan = _gvn.transform(ifisnan);
3752    assert( opt_isnan->is_If(), "Expect an IfNode");
3753    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
3754    Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
3755
3756    set_control(iftrue);
3757
3758    static const jlong nan_bits = CONST64(0x7ff8000000000000);
3759    Node *slow_result = longcon(nan_bits); // return NaN
3760    phi->init_req(1, _gvn.transform( slow_result ));
3761    r->init_req(1, iftrue);
3762
3763    // Else fall through
3764    Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
3765    set_control(iffalse);
3766
3767    phi->init_req(2, _gvn.transform( new (C, 2) MoveD2LNode(value)));
3768    r->init_req(2, iffalse);
3769
3770    // Post merge
3771    set_control(_gvn.transform(r));
3772    record_for_igvn(r);
3773
3774    Node* result = _gvn.transform(phi);
3775    assert(result->bottom_type()->isa_long(), "must be");
3776    push_pair(result);
3777
3778    C->set_has_split_ifs(true); // Has chance for split-if optimization
3779
3780    break;
3781  }
3782
3783  case vmIntrinsics::_floatToIntBits: {
3784    Node* value = pop();
3785
3786    // two paths (plus control) merge in a wood
3787    RegionNode *r = new (C, 3) RegionNode(3);
3788    Node *phi = new (C, 3) PhiNode(r, TypeInt::INT);
3789
3790    Node *cmpisnan = _gvn.transform( new (C, 3) CmpFNode(value, value));
3791    // Build the boolean node
3792    Node *bolisnan = _gvn.transform( new (C, 2) BoolNode( cmpisnan, BoolTest::ne ) );
3793
3794    // Branch either way.
3795    // NaN case is less traveled, which makes all the difference.
3796    IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3797    Node *opt_isnan = _gvn.transform(ifisnan);
3798    assert( opt_isnan->is_If(), "Expect an IfNode");
3799    IfNode *opt_ifisnan = (IfNode*)opt_isnan;
3800    Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(opt_ifisnan) );
3801
3802    set_control(iftrue);
3803
3804    static const jint nan_bits = 0x7fc00000;
3805    Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN
3806    phi->init_req(1, _gvn.transform( slow_result ));
3807    r->init_req(1, iftrue);
3808
3809    // Else fall through
3810    Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(opt_ifisnan) );
3811    set_control(iffalse);
3812
3813    phi->init_req(2, _gvn.transform( new (C, 2) MoveF2INode(value)));
3814    r->init_req(2, iffalse);
3815
3816    // Post merge
3817    set_control(_gvn.transform(r));
3818    record_for_igvn(r);
3819
3820    Node* result = _gvn.transform(phi);
3821    assert(result->bottom_type()->isa_int(), "must be");
3822    push(result);
3823
3824    C->set_has_split_ifs(true); // Has chance for split-if optimization
3825
3826    break;
3827  }
3828
3829  default:
3830    ShouldNotReachHere();
3831  }
3832
3833  return true;
3834}
3835
3836#ifdef _LP64
3837#define XTOP ,top() /*additional argument*/
3838#else  //_LP64
3839#define XTOP        /*no additional argument*/
3840#endif //_LP64
3841
3842//----------------------inline_unsafe_copyMemory-------------------------
3843bool LibraryCallKit::inline_unsafe_copyMemory() {
3844  if (callee()->is_static())  return false;  // caller must have the capability!
3845  int nargs = 1 + 5 + 3;  // 5 args:  (src: ptr,off, dst: ptr,off, size)
3846  assert(signature()->size() == nargs-1, "copy has 5 arguments");
3847  null_check_receiver(callee());  // check then ignore argument(0)
3848  if (stopped())  return true;
3849
3850  C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
3851
3852  Node* src_ptr = argument(1);
3853  Node* src_off = ConvL2X(argument(2));
3854  assert(argument(3)->is_top(), "2nd half of long");
3855  Node* dst_ptr = argument(4);
3856  Node* dst_off = ConvL2X(argument(5));
3857  assert(argument(6)->is_top(), "2nd half of long");
3858  Node* size    = ConvL2X(argument(7));
3859  assert(argument(8)->is_top(), "2nd half of long");
3860
3861  assert(Unsafe_field_offset_to_byte_offset(11) == 11,
3862         "fieldOffset must be byte-scaled");
3863
3864  Node* src = make_unsafe_address(src_ptr, src_off);
3865  Node* dst = make_unsafe_address(dst_ptr, dst_off);
3866
3867  // Conservatively insert a memory barrier on all memory slices.
3868  // Do not let writes of the copy source or destination float below the copy.
3869  insert_mem_bar(Op_MemBarCPUOrder);
3870
3871  // Call it.  Note that the length argument is not scaled.
3872  make_runtime_call(RC_LEAF|RC_NO_FP,
3873                    OptoRuntime::fast_arraycopy_Type(),
3874                    StubRoutines::unsafe_arraycopy(),
3875                    "unsafe_arraycopy",
3876                    TypeRawPtr::BOTTOM,
3877                    src, dst, size XTOP);
3878
3879  // Do not let reads of the copy destination float above the copy.
3880  insert_mem_bar(Op_MemBarCPUOrder);
3881
3882  return true;
3883}
3884
3885//------------------------clone_coping-----------------------------------
3886// Helper function for inline_native_clone.
3887void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
3888  assert(obj_size != NULL, "");
3889  Node* raw_obj = alloc_obj->in(1);
3890  assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
3891
3892  if (ReduceBulkZeroing) {
3893    // We will be completely responsible for initializing this object -
3894    // mark Initialize node as complete.
3895    AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
3896    // The object was just allocated - there should be no any stores!
3897    guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
3898  }
3899
3900  // Cast to Object for arraycopy.
3901  // We can't use the original CheckCastPP since it should be moved
3902  // after the arraycopy to prevent stores flowing above it.
3903  Node* new_obj = new(C, 2) CheckCastPPNode(alloc_obj->in(0), raw_obj,
3904                                            TypeInstPtr::NOTNULL);
3905  new_obj = _gvn.transform(new_obj);
3906  // Substitute in the locally valid dest_oop.
3907  replace_in_map(alloc_obj, new_obj);
3908
3909  // Copy the fastest available way.
3910  // TODO: generate fields copies for small objects instead.
3911  Node* src  = obj;
3912  Node* dest = new_obj;
3913  Node* size = _gvn.transform(obj_size);
3914
3915  // Exclude the header but include array length to copy by 8 bytes words.
3916  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
3917  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
3918                            instanceOopDesc::base_offset_in_bytes();
3919  // base_off:
3920  // 8  - 32-bit VM
3921  // 12 - 64-bit VM, compressed oops
3922  // 16 - 64-bit VM, normal oops
3923  if (base_off % BytesPerLong != 0) {
3924    assert(UseCompressedOops, "");
3925    if (is_array) {
3926      // Exclude length to copy by 8 bytes words.
3927      base_off += sizeof(int);
3928    } else {
3929      // Include klass to copy by 8 bytes words.
3930      base_off = instanceOopDesc::klass_offset_in_bytes();
3931    }
3932    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
3933  }
3934  src  = basic_plus_adr(src,  base_off);
3935  dest = basic_plus_adr(dest, base_off);
3936
3937  // Compute the length also, if needed:
3938  Node* countx = size;
3939  countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
3940  countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
3941
3942  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
3943  bool disjoint_bases = true;
3944  generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
3945                               src, NULL, dest, NULL, countx);
3946
3947  // If necessary, emit some card marks afterwards.  (Non-arrays only.)
3948  if (card_mark) {
3949    assert(!is_array, "");
3950    // Put in store barrier for any and all oops we are sticking
3951    // into this object.  (We could avoid this if we could prove
3952    // that the object type contains no oop fields at all.)
3953    Node* no_particular_value = NULL;
3954    Node* no_particular_field = NULL;
3955    int raw_adr_idx = Compile::AliasIdxRaw;
3956    post_barrier(control(),
3957                 memory(raw_adr_type),
3958                 new_obj,
3959                 no_particular_field,
3960                 raw_adr_idx,
3961                 no_particular_value,
3962                 T_OBJECT,
3963                 false);
3964  }
3965
3966  // Move the original CheckCastPP after arraycopy.
3967  _gvn.hash_delete(alloc_obj);
3968  alloc_obj->set_req(0, control());
3969  // Replace raw memory edge with new CheckCastPP to have a live oop
3970  // at safepoints instead of raw value.
3971  assert(new_obj->is_CheckCastPP() && new_obj->in(1) == alloc_obj->in(1), "sanity");
3972  alloc_obj->set_req(1, new_obj);    // cast to the original type
3973  _gvn.hash_find_insert(alloc_obj);  // put back into GVN table
3974  // Restore in the locally valid dest_oop.
3975  replace_in_map(new_obj, alloc_obj);
3976}
3977
3978//------------------------inline_native_clone----------------------------
3979// Here are the simple edge cases:
3980//  null receiver => normal trap
3981//  virtual and clone was overridden => slow path to out-of-line clone
3982//  not cloneable or finalizer => slow path to out-of-line Object.clone
3983//
3984// The general case has two steps, allocation and copying.
3985// Allocation has two cases, and uses GraphKit::new_instance or new_array.
3986//
3987// Copying also has two cases, oop arrays and everything else.
3988// Oop arrays use arrayof_oop_arraycopy (same as System.arraycopy).
3989// Everything else uses the tight inline loop supplied by CopyArrayNode.
3990//
3991// These steps fold up nicely if and when the cloned object's klass
3992// can be sharply typed as an object array, a type array, or an instance.
3993//
3994bool LibraryCallKit::inline_native_clone(bool is_virtual) {
3995  int nargs = 1;
3996  Node* obj = null_check_receiver(callee());
3997  if (stopped())  return true;
3998  Node* obj_klass = load_object_klass(obj);
3999  const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
4000  const TypeOopPtr*   toop   = ((tklass != NULL)
4001                                ? tklass->as_instance_type()
4002                                : TypeInstPtr::NOTNULL);
4003
4004  // Conservatively insert a memory barrier on all memory slices.
4005  // Do not let writes into the original float below the clone.
4006  insert_mem_bar(Op_MemBarCPUOrder);
4007
4008  // paths into result_reg:
4009  enum {
4010    _slow_path = 1,     // out-of-line call to clone method (virtual or not)
4011    _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
4012    _array_path,        // plain array allocation, plus arrayof_long_arraycopy
4013    _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
4014    PATH_LIMIT
4015  };
4016  RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
4017  PhiNode*    result_val = new(C, PATH_LIMIT) PhiNode(result_reg,
4018                                                      TypeInstPtr::NOTNULL);
4019  PhiNode*    result_i_o = new(C, PATH_LIMIT) PhiNode(result_reg, Type::ABIO);
4020  PhiNode*    result_mem = new(C, PATH_LIMIT) PhiNode(result_reg, Type::MEMORY,
4021                                                      TypePtr::BOTTOM);
4022  record_for_igvn(result_reg);
4023
4024  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
4025  int raw_adr_idx = Compile::AliasIdxRaw;
4026  const bool raw_mem_only = true;
4027
4028  Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
4029  if (array_ctl != NULL) {
4030    // It's an array.
4031    PreserveJVMState pjvms(this);
4032    set_control(array_ctl);
4033    Node* obj_length = load_array_length(obj);
4034    Node* obj_size = NULL;
4035    Node* alloc_obj = new_array(obj_klass, obj_length, nargs,
4036                                raw_mem_only, &obj_size);
4037
4038    if (!use_ReduceInitialCardMarks()) {
4039      // If it is an oop array, it requires very special treatment,
4040      // because card marking is required on each card of the array.
4041      Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
4042      if (is_obja != NULL) {
4043        PreserveJVMState pjvms2(this);
4044        set_control(is_obja);
4045        // Generate a direct call to the right arraycopy function(s).
4046        bool disjoint_bases = true;
4047        bool length_never_negative = true;
4048        generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
4049                           obj, intcon(0), alloc_obj, intcon(0),
4050                           obj_length,
4051                           disjoint_bases, length_never_negative);
4052        result_reg->init_req(_objArray_path, control());
4053        result_val->init_req(_objArray_path, alloc_obj);
4054        result_i_o ->set_req(_objArray_path, i_o());
4055        result_mem ->set_req(_objArray_path, reset_memory());
4056      }
4057    }
4058    // We can dispense with card marks if we know the allocation
4059    // comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
4060    // causes the non-eden paths to simulate a fresh allocation,
4061    // insofar that no further card marks are required to initialize
4062    // the object.
4063
4064    // Otherwise, there are no card marks to worry about.
4065
4066    if (!stopped()) {
4067      copy_to_clone(obj, alloc_obj, obj_size, true, false);
4068
4069      // Present the results of the copy.
4070      result_reg->init_req(_array_path, control());
4071      result_val->init_req(_array_path, alloc_obj);
4072      result_i_o ->set_req(_array_path, i_o());
4073      result_mem ->set_req(_array_path, reset_memory());
4074    }
4075  }
4076
4077  // We only go to the instance fast case code if we pass a number of guards.
4078  // The paths which do not pass are accumulated in the slow_region.
4079  RegionNode* slow_region = new (C, 1) RegionNode(1);
4080  record_for_igvn(slow_region);
4081  if (!stopped()) {
4082    // It's an instance (we did array above).  Make the slow-path tests.
4083    // If this is a virtual call, we generate a funny guard.  We grab
4084    // the vtable entry corresponding to clone() from the target object.
4085    // If the target method which we are calling happens to be the
4086    // Object clone() method, we pass the guard.  We do not need this
4087    // guard for non-virtual calls; the caller is known to be the native
4088    // Object clone().
4089    if (is_virtual) {
4090      generate_virtual_guard(obj_klass, slow_region);
4091    }
4092
4093    // The object must be cloneable and must not have a finalizer.
4094    // Both of these conditions may be checked in a single test.
4095    // We could optimize the cloneable test further, but we don't care.
4096    generate_access_flags_guard(obj_klass,
4097                                // Test both conditions:
4098                                JVM_ACC_IS_CLONEABLE | JVM_ACC_HAS_FINALIZER,
4099                                // Must be cloneable but not finalizer:
4100                                JVM_ACC_IS_CLONEABLE,
4101                                slow_region);
4102  }
4103
4104  if (!stopped()) {
4105    // It's an instance, and it passed the slow-path tests.
4106    PreserveJVMState pjvms(this);
4107    Node* obj_size = NULL;
4108    Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
4109
4110    copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
4111
4112    // Present the results of the slow call.
4113    result_reg->init_req(_instance_path, control());
4114    result_val->init_req(_instance_path, alloc_obj);
4115    result_i_o ->set_req(_instance_path, i_o());
4116    result_mem ->set_req(_instance_path, reset_memory());
4117  }
4118
4119  // Generate code for the slow case.  We make a call to clone().
4120  set_control(_gvn.transform(slow_region));
4121  if (!stopped()) {
4122    PreserveJVMState pjvms(this);
4123    CallJavaNode* slow_call = generate_method_call(vmIntrinsics::_clone, is_virtual);
4124    Node* slow_result = set_results_for_java_call(slow_call);
4125    // this->control() comes from set_results_for_java_call
4126    result_reg->init_req(_slow_path, control());
4127    result_val->init_req(_slow_path, slow_result);
4128    result_i_o ->set_req(_slow_path, i_o());
4129    result_mem ->set_req(_slow_path, reset_memory());
4130  }
4131
4132  // Return the combined state.
4133  set_control(    _gvn.transform(result_reg) );
4134  set_i_o(        _gvn.transform(result_i_o) );
4135  set_all_memory( _gvn.transform(result_mem) );
4136
4137  push(_gvn.transform(result_val));
4138
4139  return true;
4140}
4141
4142
4143// constants for computing the copy function
4144enum {
4145  COPYFUNC_UNALIGNED = 0,
4146  COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
4147  COPYFUNC_CONJOINT = 0,
4148  COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
4149};
4150
4151// Note:  The condition "disjoint" applies also for overlapping copies
4152// where an descending copy is permitted (i.e., dest_offset <= src_offset).
4153static address
4154select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) {
4155  int selector =
4156    (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
4157    (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
4158
4159#define RETURN_STUB(xxx_arraycopy) { \
4160  name = #xxx_arraycopy; \
4161  return StubRoutines::xxx_arraycopy(); }
4162
4163  switch (t) {
4164  case T_BYTE:
4165  case T_BOOLEAN:
4166    switch (selector) {
4167    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
4168    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
4169    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
4170    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
4171    }
4172  case T_CHAR:
4173  case T_SHORT:
4174    switch (selector) {
4175    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
4176    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
4177    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
4178    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
4179    }
4180  case T_INT:
4181  case T_FLOAT:
4182    switch (selector) {
4183    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
4184    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
4185    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
4186    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
4187    }
4188  case T_DOUBLE:
4189  case T_LONG:
4190    switch (selector) {
4191    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
4192    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
4193    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
4194    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
4195    }
4196  case T_ARRAY:
4197  case T_OBJECT:
4198    switch (selector) {
4199    case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_arraycopy);
4200    case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_arraycopy);
4201    case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(oop_disjoint_arraycopy);
4202    case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_oop_disjoint_arraycopy);
4203    }
4204  default:
4205    ShouldNotReachHere();
4206    return NULL;
4207  }
4208
4209#undef RETURN_STUB
4210}
4211
4212//------------------------------basictype2arraycopy----------------------------
4213address LibraryCallKit::basictype2arraycopy(BasicType t,
4214                                            Node* src_offset,
4215                                            Node* dest_offset,
4216                                            bool disjoint_bases,
4217                                            const char* &name) {
4218  const TypeInt* src_offset_inttype  = gvn().find_int_type(src_offset);;
4219  const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);;
4220
4221  bool aligned = false;
4222  bool disjoint = disjoint_bases;
4223
4224  // if the offsets are the same, we can treat the memory regions as
4225  // disjoint, because either the memory regions are in different arrays,
4226  // or they are identical (which we can treat as disjoint.)  We can also
4227  // treat a copy with a destination index  less that the source index
4228  // as disjoint since a low->high copy will work correctly in this case.
4229  if (src_offset_inttype != NULL && src_offset_inttype->is_con() &&
4230      dest_offset_inttype != NULL && dest_offset_inttype->is_con()) {
4231    // both indices are constants
4232    int s_offs = src_offset_inttype->get_con();
4233    int d_offs = dest_offset_inttype->get_con();
4234    int element_size = type2aelembytes(t);
4235    aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
4236              ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
4237    if (s_offs >= d_offs)  disjoint = true;
4238  } else if (src_offset == dest_offset && src_offset != NULL) {
4239    // This can occur if the offsets are identical non-constants.
4240    disjoint = true;
4241  }
4242
4243  return select_arraycopy_function(t, aligned, disjoint, name);
4244}
4245
4246
4247//------------------------------inline_arraycopy-----------------------
4248bool LibraryCallKit::inline_arraycopy() {
4249  // Restore the stack and pop off the arguments.
4250  int nargs = 5;  // 2 oops, 3 ints, no size_t or long
4251  assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
4252
4253  Node *src         = argument(0);
4254  Node *src_offset  = argument(1);
4255  Node *dest        = argument(2);
4256  Node *dest_offset = argument(3);
4257  Node *length      = argument(4);
4258
4259  // Compile time checks.  If any of these checks cannot be verified at compile time,
4260  // we do not make a fast path for this call.  Instead, we let the call remain as it
4261  // is.  The checks we choose to mandate at compile time are:
4262  //
4263  // (1) src and dest are arrays.
4264  const Type* src_type = src->Value(&_gvn);
4265  const Type* dest_type = dest->Value(&_gvn);
4266  const TypeAryPtr* top_src = src_type->isa_aryptr();
4267  const TypeAryPtr* top_dest = dest_type->isa_aryptr();
4268  if (top_src  == NULL || top_src->klass()  == NULL ||
4269      top_dest == NULL || top_dest->klass() == NULL) {
4270    // Conservatively insert a memory barrier on all memory slices.
4271    // Do not let writes into the source float below the arraycopy.
4272    insert_mem_bar(Op_MemBarCPUOrder);
4273
4274    // Call StubRoutines::generic_arraycopy stub.
4275    generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
4276                       src, src_offset, dest, dest_offset, length);
4277
4278    // Do not let reads from the destination float above the arraycopy.
4279    // Since we cannot type the arrays, we don't know which slices
4280    // might be affected.  We could restrict this barrier only to those
4281    // memory slices which pertain to array elements--but don't bother.
4282    if (!InsertMemBarAfterArraycopy)
4283      // (If InsertMemBarAfterArraycopy, there is already one in place.)
4284      insert_mem_bar(Op_MemBarCPUOrder);
4285    return true;
4286  }
4287
4288  // (2) src and dest arrays must have elements of the same BasicType
4289  // Figure out the size and type of the elements we will be copying.
4290  BasicType src_elem  =  top_src->klass()->as_array_klass()->element_type()->basic_type();
4291  BasicType dest_elem = top_dest->klass()->as_array_klass()->element_type()->basic_type();
4292  if (src_elem  == T_ARRAY)  src_elem  = T_OBJECT;
4293  if (dest_elem == T_ARRAY)  dest_elem = T_OBJECT;
4294
4295  if (src_elem != dest_elem || dest_elem == T_VOID) {
4296    // The component types are not the same or are not recognized.  Punt.
4297    // (But, avoid the native method wrapper to JVM_ArrayCopy.)
4298    generate_slow_arraycopy(TypePtr::BOTTOM,
4299                            src, src_offset, dest, dest_offset, length);
4300    return true;
4301  }
4302
4303  //---------------------------------------------------------------------------
4304  // We will make a fast path for this call to arraycopy.
4305
4306  // We have the following tests left to perform:
4307  //
4308  // (3) src and dest must not be null.
4309  // (4) src_offset must not be negative.
4310  // (5) dest_offset must not be negative.
4311  // (6) length must not be negative.
4312  // (7) src_offset + length must not exceed length of src.
4313  // (8) dest_offset + length must not exceed length of dest.
4314  // (9) each element of an oop array must be assignable
4315
4316  RegionNode* slow_region = new (C, 1) RegionNode(1);
4317  record_for_igvn(slow_region);
4318
4319  // (3) operands must not be null
4320  // We currently perform our null checks with the do_null_check routine.
4321  // This means that the null exceptions will be reported in the caller
4322  // rather than (correctly) reported inside of the native arraycopy call.
4323  // This should be corrected, given time.  We do our null check with the
4324  // stack pointer restored.
4325  _sp += nargs;
4326  src  = do_null_check(src,  T_ARRAY);
4327  dest = do_null_check(dest, T_ARRAY);
4328  _sp -= nargs;
4329
4330  // (4) src_offset must not be negative.
4331  generate_negative_guard(src_offset, slow_region);
4332
4333  // (5) dest_offset must not be negative.
4334  generate_negative_guard(dest_offset, slow_region);
4335
4336  // (6) length must not be negative (moved to generate_arraycopy()).
4337  // generate_negative_guard(length, slow_region);
4338
4339  // (7) src_offset + length must not exceed length of src.
4340  generate_limit_guard(src_offset, length,
4341                       load_array_length(src),
4342                       slow_region);
4343
4344  // (8) dest_offset + length must not exceed length of dest.
4345  generate_limit_guard(dest_offset, length,
4346                       load_array_length(dest),
4347                       slow_region);
4348
4349  // (9) each element of an oop array must be assignable
4350  // The generate_arraycopy subroutine checks this.
4351
4352  // This is where the memory effects are placed:
4353  const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
4354  generate_arraycopy(adr_type, dest_elem,
4355                     src, src_offset, dest, dest_offset, length,
4356                     false, false, slow_region);
4357
4358  return true;
4359}
4360
4361//-----------------------------generate_arraycopy----------------------
4362// Generate an optimized call to arraycopy.
4363// Caller must guard against non-arrays.
4364// Caller must determine a common array basic-type for both arrays.
4365// Caller must validate offsets against array bounds.
4366// The slow_region has already collected guard failure paths
4367// (such as out of bounds length or non-conformable array types).
4368// The generated code has this shape, in general:
4369//
4370//     if (length == 0)  return   // via zero_path
4371//     slowval = -1
4372//     if (types unknown) {
4373//       slowval = call generic copy loop
4374//       if (slowval == 0)  return  // via checked_path
4375//     } else if (indexes in bounds) {
4376//       if ((is object array) && !(array type check)) {
4377//         slowval = call checked copy loop
4378//         if (slowval == 0)  return  // via checked_path
4379//       } else {
4380//         call bulk copy loop
4381//         return  // via fast_path
4382//       }
4383//     }
4384//     // adjust params for remaining work:
4385//     if (slowval != -1) {
4386//       n = -1^slowval; src_offset += n; dest_offset += n; length -= n
4387//     }
4388//   slow_region:
4389//     call slow arraycopy(src, src_offset, dest, dest_offset, length)
4390//     return  // via slow_call_path
4391//
4392// This routine is used from several intrinsics:  System.arraycopy,
4393// Object.clone (the array subcase), and Arrays.copyOf[Range].
4394//
4395void
4396LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
4397                                   BasicType basic_elem_type,
4398                                   Node* src,  Node* src_offset,
4399                                   Node* dest, Node* dest_offset,
4400                                   Node* copy_length,
4401                                   bool disjoint_bases,
4402                                   bool length_never_negative,
4403                                   RegionNode* slow_region) {
4404
4405  if (slow_region == NULL) {
4406    slow_region = new(C,1) RegionNode(1);
4407    record_for_igvn(slow_region);
4408  }
4409
4410  Node* original_dest      = dest;
4411  AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
4412  bool  must_clear_dest    = false;
4413
4414  // See if this is the initialization of a newly-allocated array.
4415  // If so, we will take responsibility here for initializing it to zero.
4416  // (Note:  Because tightly_coupled_allocation performs checks on the
4417  // out-edges of the dest, we need to avoid making derived pointers
4418  // from it until we have checked its uses.)
4419  if (ReduceBulkZeroing
4420      && !ZeroTLAB              // pointless if already zeroed
4421      && basic_elem_type != T_CONFLICT // avoid corner case
4422      && !_gvn.eqv_uncast(src, dest)
4423      && ((alloc = tightly_coupled_allocation(dest, slow_region))
4424          != NULL)
4425      && _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
4426      && alloc->maybe_set_complete(&_gvn)) {
4427    // "You break it, you buy it."
4428    InitializeNode* init = alloc->initialization();
4429    assert(init->is_complete(), "we just did this");
4430    assert(dest->is_CheckCastPP(), "sanity");
4431    assert(dest->in(0)->in(0) == init, "dest pinned");
4432
4433    // Cast to Object for arraycopy.
4434    // We can't use the original CheckCastPP since it should be moved
4435    // after the arraycopy to prevent stores flowing above it.
4436    Node* new_obj = new(C, 2) CheckCastPPNode(dest->in(0), dest->in(1),
4437                                              TypeInstPtr::NOTNULL);
4438    dest = _gvn.transform(new_obj);
4439    // Substitute in the locally valid dest_oop.
4440    replace_in_map(original_dest, dest);
4441    adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
4442    // From this point on, every exit path is responsible for
4443    // initializing any non-copied parts of the object to zero.
4444    must_clear_dest = true;
4445  } else {
4446    // No zeroing elimination here.
4447    alloc             = NULL;
4448    //original_dest   = dest;
4449    //must_clear_dest = false;
4450  }
4451
4452  // Results are placed here:
4453  enum { fast_path        = 1,  // normal void-returning assembly stub
4454         checked_path     = 2,  // special assembly stub with cleanup
4455         slow_call_path   = 3,  // something went wrong; call the VM
4456         zero_path        = 4,  // bypass when length of copy is zero
4457         bcopy_path       = 5,  // copy primitive array by 64-bit blocks
4458         PATH_LIMIT       = 6
4459  };
4460  RegionNode* result_region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
4461  PhiNode*    result_i_o    = new(C, PATH_LIMIT) PhiNode(result_region, Type::ABIO);
4462  PhiNode*    result_memory = new(C, PATH_LIMIT) PhiNode(result_region, Type::MEMORY, adr_type);
4463  record_for_igvn(result_region);
4464  _gvn.set_type_bottom(result_i_o);
4465  _gvn.set_type_bottom(result_memory);
4466  assert(adr_type != TypePtr::BOTTOM, "must be RawMem or a T[] slice");
4467
4468  // The slow_control path:
4469  Node* slow_control;
4470  Node* slow_i_o = i_o();
4471  Node* slow_mem = memory(adr_type);
4472  debug_only(slow_control = (Node*) badAddress);
4473
4474  // Checked control path:
4475  Node* checked_control = top();
4476  Node* checked_mem     = NULL;
4477  Node* checked_i_o     = NULL;
4478  Node* checked_value   = NULL;
4479
4480  if (basic_elem_type == T_CONFLICT) {
4481    assert(!must_clear_dest, "");
4482    Node* cv = generate_generic_arraycopy(adr_type,
4483                                          src, src_offset, dest, dest_offset,
4484                                          copy_length);
4485    if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4486    checked_control = control();
4487    checked_i_o     = i_o();
4488    checked_mem     = memory(adr_type);
4489    checked_value   = cv;
4490    set_control(top());         // no fast path
4491  }
4492
4493  Node* not_pos = generate_nonpositive_guard(copy_length, length_never_negative);
4494  if (not_pos != NULL) {
4495    PreserveJVMState pjvms(this);
4496    set_control(not_pos);
4497
4498    // (6) length must not be negative.
4499    if (!length_never_negative) {
4500      generate_negative_guard(copy_length, slow_region);
4501    }
4502
4503    // copy_length is 0.
4504    if (!stopped() && must_clear_dest) {
4505      Node* dest_length = alloc->in(AllocateNode::ALength);
4506      if (_gvn.eqv_uncast(copy_length, dest_length)
4507          || _gvn.find_int_con(dest_length, 1) <= 0) {
4508        // There is no zeroing to do. No need for a secondary raw memory barrier.
4509      } else {
4510        // Clear the whole thing since there are no source elements to copy.
4511        generate_clear_array(adr_type, dest, basic_elem_type,
4512                             intcon(0), NULL,
4513                             alloc->in(AllocateNode::AllocSize));
4514        // Use a secondary InitializeNode as raw memory barrier.
4515        // Currently it is needed only on this path since other
4516        // paths have stub or runtime calls as raw memory barriers.
4517        InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
4518                                                       Compile::AliasIdxRaw,
4519                                                       top())->as_Initialize();
4520        init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
4521      }
4522    }
4523
4524    // Present the results of the fast call.
4525    result_region->init_req(zero_path, control());
4526    result_i_o   ->init_req(zero_path, i_o());
4527    result_memory->init_req(zero_path, memory(adr_type));
4528  }
4529
4530  if (!stopped() && must_clear_dest) {
4531    // We have to initialize the *uncopied* part of the array to zero.
4532    // The copy destination is the slice dest[off..off+len].  The other slices
4533    // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length].
4534    Node* dest_size   = alloc->in(AllocateNode::AllocSize);
4535    Node* dest_length = alloc->in(AllocateNode::ALength);
4536    Node* dest_tail   = _gvn.transform( new(C,3) AddINode(dest_offset,
4537                                                          copy_length) );
4538
4539    // If there is a head section that needs zeroing, do it now.
4540    if (find_int_con(dest_offset, -1) != 0) {
4541      generate_clear_array(adr_type, dest, basic_elem_type,
4542                           intcon(0), dest_offset,
4543                           NULL);
4544    }
4545
4546    // Next, perform a dynamic check on the tail length.
4547    // It is often zero, and we can win big if we prove this.
4548    // There are two wins:  Avoid generating the ClearArray
4549    // with its attendant messy index arithmetic, and upgrade
4550    // the copy to a more hardware-friendly word size of 64 bits.
4551    Node* tail_ctl = NULL;
4552    if (!stopped() && !_gvn.eqv_uncast(dest_tail, dest_length)) {
4553      Node* cmp_lt   = _gvn.transform( new(C,3) CmpINode(dest_tail, dest_length) );
4554      Node* bol_lt   = _gvn.transform( new(C,2) BoolNode(cmp_lt, BoolTest::lt) );
4555      tail_ctl = generate_slow_guard(bol_lt, NULL);
4556      assert(tail_ctl != NULL || !stopped(), "must be an outcome");
4557    }
4558
4559    // At this point, let's assume there is no tail.
4560    if (!stopped() && alloc != NULL && basic_elem_type != T_OBJECT) {
4561      // There is no tail.  Try an upgrade to a 64-bit copy.
4562      bool didit = false;
4563      { PreserveJVMState pjvms(this);
4564        didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc,
4565                                         src, src_offset, dest, dest_offset,
4566                                         dest_size);
4567        if (didit) {
4568          // Present the results of the block-copying fast call.
4569          result_region->init_req(bcopy_path, control());
4570          result_i_o   ->init_req(bcopy_path, i_o());
4571          result_memory->init_req(bcopy_path, memory(adr_type));
4572        }
4573      }
4574      if (didit)
4575        set_control(top());     // no regular fast path
4576    }
4577
4578    // Clear the tail, if any.
4579    if (tail_ctl != NULL) {
4580      Node* notail_ctl = stopped() ? NULL : control();
4581      set_control(tail_ctl);
4582      if (notail_ctl == NULL) {
4583        generate_clear_array(adr_type, dest, basic_elem_type,
4584                             dest_tail, NULL,
4585                             dest_size);
4586      } else {
4587        // Make a local merge.
4588        Node* done_ctl = new(C,3) RegionNode(3);
4589        Node* done_mem = new(C,3) PhiNode(done_ctl, Type::MEMORY, adr_type);
4590        done_ctl->init_req(1, notail_ctl);
4591        done_mem->init_req(1, memory(adr_type));
4592        generate_clear_array(adr_type, dest, basic_elem_type,
4593                             dest_tail, NULL,
4594                             dest_size);
4595        done_ctl->init_req(2, control());
4596        done_mem->init_req(2, memory(adr_type));
4597        set_control( _gvn.transform(done_ctl) );
4598        set_memory(  _gvn.transform(done_mem), adr_type );
4599      }
4600    }
4601  }
4602
4603  BasicType copy_type = basic_elem_type;
4604  assert(basic_elem_type != T_ARRAY, "caller must fix this");
4605  if (!stopped() && copy_type == T_OBJECT) {
4606    // If src and dest have compatible element types, we can copy bits.
4607    // Types S[] and D[] are compatible if D is a supertype of S.
4608    //
4609    // If they are not, we will use checked_oop_disjoint_arraycopy,
4610    // which performs a fast optimistic per-oop check, and backs off
4611    // further to JVM_ArrayCopy on the first per-oop check that fails.
4612    // (Actually, we don't move raw bits only; the GC requires card marks.)
4613
4614    // Get the klassOop for both src and dest
4615    Node* src_klass  = load_object_klass(src);
4616    Node* dest_klass = load_object_klass(dest);
4617
4618    // Generate the subtype check.
4619    // This might fold up statically, or then again it might not.
4620    //
4621    // Non-static example:  Copying List<String>.elements to a new String[].
4622    // The backing store for a List<String> is always an Object[],
4623    // but its elements are always type String, if the generic types
4624    // are correct at the source level.
4625    //
4626    // Test S[] against D[], not S against D, because (probably)
4627    // the secondary supertype cache is less busy for S[] than S.
4628    // This usually only matters when D is an interface.
4629    Node* not_subtype_ctrl = gen_subtype_check(src_klass, dest_klass);
4630    // Plug failing path into checked_oop_disjoint_arraycopy
4631    if (not_subtype_ctrl != top()) {
4632      PreserveJVMState pjvms(this);
4633      set_control(not_subtype_ctrl);
4634      // (At this point we can assume disjoint_bases, since types differ.)
4635      int ek_offset = objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc);
4636      Node* p1 = basic_plus_adr(dest_klass, ek_offset);
4637      Node* n1 = LoadKlassNode::make(_gvn, immutable_memory(), p1, TypeRawPtr::BOTTOM);
4638      Node* dest_elem_klass = _gvn.transform(n1);
4639      Node* cv = generate_checkcast_arraycopy(adr_type,
4640                                              dest_elem_klass,
4641                                              src, src_offset, dest, dest_offset,
4642                                              copy_length);
4643      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
4644      checked_control = control();
4645      checked_i_o     = i_o();
4646      checked_mem     = memory(adr_type);
4647      checked_value   = cv;
4648    }
4649    // At this point we know we do not need type checks on oop stores.
4650
4651    // Let's see if we need card marks:
4652    if (alloc != NULL && use_ReduceInitialCardMarks()) {
4653      // If we do not need card marks, copy using the jint or jlong stub.
4654      copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT);
4655      assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
4656             "sizes agree");
4657    }
4658  }
4659
4660  if (!stopped()) {
4661    // Generate the fast path, if possible.
4662    PreserveJVMState pjvms(this);
4663    generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases,
4664                                 src, src_offset, dest, dest_offset,
4665                                 ConvI2X(copy_length));
4666
4667    // Present the results of the fast call.
4668    result_region->init_req(fast_path, control());
4669    result_i_o   ->init_req(fast_path, i_o());
4670    result_memory->init_req(fast_path, memory(adr_type));
4671  }
4672
4673  // Here are all the slow paths up to this point, in one bundle:
4674  slow_control = top();
4675  if (slow_region != NULL)
4676    slow_control = _gvn.transform(slow_region);
4677  debug_only(slow_region = (RegionNode*)badAddress);
4678
4679  set_control(checked_control);
4680  if (!stopped()) {
4681    // Clean up after the checked call.
4682    // The returned value is either 0 or -1^K,
4683    // where K = number of partially transferred array elements.
4684    Node* cmp = _gvn.transform( new(C, 3) CmpINode(checked_value, intcon(0)) );
4685    Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) );
4686    IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
4687
4688    // If it is 0, we are done, so transfer to the end.
4689    Node* checks_done = _gvn.transform( new(C, 1) IfTrueNode(iff) );
4690    result_region->init_req(checked_path, checks_done);
4691    result_i_o   ->init_req(checked_path, checked_i_o);
4692    result_memory->init_req(checked_path, checked_mem);
4693
4694    // If it is not zero, merge into the slow call.
4695    set_control( _gvn.transform( new(C, 1) IfFalseNode(iff) ));
4696    RegionNode* slow_reg2 = new(C, 3) RegionNode(3);
4697    PhiNode*    slow_i_o2 = new(C, 3) PhiNode(slow_reg2, Type::ABIO);
4698    PhiNode*    slow_mem2 = new(C, 3) PhiNode(slow_reg2, Type::MEMORY, adr_type);
4699    record_for_igvn(slow_reg2);
4700    slow_reg2  ->init_req(1, slow_control);
4701    slow_i_o2  ->init_req(1, slow_i_o);
4702    slow_mem2  ->init_req(1, slow_mem);
4703    slow_reg2  ->init_req(2, control());
4704    slow_i_o2  ->init_req(2, checked_i_o);
4705    slow_mem2  ->init_req(2, checked_mem);
4706
4707    slow_control = _gvn.transform(slow_reg2);
4708    slow_i_o     = _gvn.transform(slow_i_o2);
4709    slow_mem     = _gvn.transform(slow_mem2);
4710
4711    if (alloc != NULL) {
4712      // We'll restart from the very beginning, after zeroing the whole thing.
4713      // This can cause double writes, but that's OK since dest is brand new.
4714      // So we ignore the low 31 bits of the value returned from the stub.
4715    } else {
4716      // We must continue the copy exactly where it failed, or else
4717      // another thread might see the wrong number of writes to dest.
4718      Node* checked_offset = _gvn.transform( new(C, 3) XorINode(checked_value, intcon(-1)) );
4719      Node* slow_offset    = new(C, 3) PhiNode(slow_reg2, TypeInt::INT);
4720      slow_offset->init_req(1, intcon(0));
4721      slow_offset->init_req(2, checked_offset);
4722      slow_offset  = _gvn.transform(slow_offset);
4723
4724      // Adjust the arguments by the conditionally incoming offset.
4725      Node* src_off_plus  = _gvn.transform( new(C, 3) AddINode(src_offset,  slow_offset) );
4726      Node* dest_off_plus = _gvn.transform( new(C, 3) AddINode(dest_offset, slow_offset) );
4727      Node* length_minus  = _gvn.transform( new(C, 3) SubINode(copy_length, slow_offset) );
4728
4729      // Tweak the node variables to adjust the code produced below:
4730      src_offset  = src_off_plus;
4731      dest_offset = dest_off_plus;
4732      copy_length = length_minus;
4733    }
4734  }
4735
4736  set_control(slow_control);
4737  if (!stopped()) {
4738    // Generate the slow path, if needed.
4739    PreserveJVMState pjvms(this);   // replace_in_map may trash the map
4740
4741    set_memory(slow_mem, adr_type);
4742    set_i_o(slow_i_o);
4743
4744    if (must_clear_dest) {
4745      generate_clear_array(adr_type, dest, basic_elem_type,
4746                           intcon(0), NULL,
4747                           alloc->in(AllocateNode::AllocSize));
4748    }
4749
4750    generate_slow_arraycopy(adr_type,
4751                            src, src_offset, dest, dest_offset,
4752                            copy_length);
4753
4754    result_region->init_req(slow_call_path, control());
4755    result_i_o   ->init_req(slow_call_path, i_o());
4756    result_memory->init_req(slow_call_path, memory(adr_type));
4757  }
4758
4759  // Remove unused edges.
4760  for (uint i = 1; i < result_region->req(); i++) {
4761    if (result_region->in(i) == NULL)
4762      result_region->init_req(i, top());
4763  }
4764
4765  // Finished; return the combined state.
4766  set_control( _gvn.transform(result_region) );
4767  set_i_o(     _gvn.transform(result_i_o)    );
4768  set_memory(  _gvn.transform(result_memory), adr_type );
4769
4770  if (dest != original_dest) {
4771    // Pin the "finished" array node after the arraycopy/zeroing operations.
4772    _gvn.hash_delete(original_dest);
4773    original_dest->set_req(0, control());
4774    // Replace raw memory edge with new CheckCastPP to have a live oop
4775    // at safepoints instead of raw value.
4776    assert(dest->is_CheckCastPP() && dest->in(1) == original_dest->in(1), "sanity");
4777    original_dest->set_req(1, dest);       // cast to the original type
4778    _gvn.hash_find_insert(original_dest);  // put back into GVN table
4779    // Restore in the locally valid dest_oop.
4780    replace_in_map(dest, original_dest);
4781  }
4782  // The memory edges above are precise in order to model effects around
4783  // array copies accurately to allow value numbering of field loads around
4784  // arraycopy.  Such field loads, both before and after, are common in Java
4785  // collections and similar classes involving header/array data structures.
4786  //
4787  // But with low number of register or when some registers are used or killed
4788  // by arraycopy calls it causes registers spilling on stack. See 6544710.
4789  // The next memory barrier is added to avoid it. If the arraycopy can be
4790  // optimized away (which it can, sometimes) then we can manually remove
4791  // the membar also.
4792  if (InsertMemBarAfterArraycopy)
4793    insert_mem_bar(Op_MemBarCPUOrder);
4794}
4795
4796
4797// Helper function which determines if an arraycopy immediately follows
4798// an allocation, with no intervening tests or other escapes for the object.
4799AllocateArrayNode*
4800LibraryCallKit::tightly_coupled_allocation(Node* ptr,
4801                                           RegionNode* slow_region) {
4802  if (stopped())             return NULL;  // no fast path
4803  if (C->AliasLevel() == 0)  return NULL;  // no MergeMems around
4804
4805  AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn);
4806  if (alloc == NULL)  return NULL;
4807
4808  Node* rawmem = memory(Compile::AliasIdxRaw);
4809  // Is the allocation's memory state untouched?
4810  if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) {
4811    // Bail out if there have been raw-memory effects since the allocation.
4812    // (Example:  There might have been a call or safepoint.)
4813    return NULL;
4814  }
4815  rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw);
4816  if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) {
4817    return NULL;
4818  }
4819
4820  // There must be no unexpected observers of this allocation.
4821  for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) {
4822    Node* obs = ptr->fast_out(i);
4823    if (obs != this->map()) {
4824      return NULL;
4825    }
4826  }
4827
4828  // This arraycopy must unconditionally follow the allocation of the ptr.
4829  Node* alloc_ctl = ptr->in(0);
4830  assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
4831
4832  Node* ctl = control();
4833  while (ctl != alloc_ctl) {
4834    // There may be guards which feed into the slow_region.
4835    // Any other control flow means that we might not get a chance
4836    // to finish initializing the allocated object.
4837    if ((ctl->is_IfFalse() || ctl->is_IfTrue()) && ctl->in(0)->is_If()) {
4838      IfNode* iff = ctl->in(0)->as_If();
4839      Node* not_ctl = iff->proj_out(1 - ctl->as_Proj()->_con);
4840      assert(not_ctl != NULL && not_ctl != ctl, "found alternate");
4841      if (slow_region != NULL && slow_region->find_edge(not_ctl) >= 1) {
4842        ctl = iff->in(0);       // This test feeds the known slow_region.
4843        continue;
4844      }
4845      // One more try:  Various low-level checks bottom out in
4846      // uncommon traps.  If the debug-info of the trap omits
4847      // any reference to the allocation, as we've already
4848      // observed, then there can be no objection to the trap.
4849      bool found_trap = false;
4850      for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) {
4851        Node* obs = not_ctl->fast_out(j);
4852        if (obs->in(0) == not_ctl && obs->is_Call() &&
4853            (obs->as_Call()->entry_point() ==
4854             SharedRuntime::uncommon_trap_blob()->instructions_begin())) {
4855          found_trap = true; break;
4856        }
4857      }
4858      if (found_trap) {
4859        ctl = iff->in(0);       // This test feeds a harmless uncommon trap.
4860        continue;
4861      }
4862    }
4863    return NULL;
4864  }
4865
4866  // If we get this far, we have an allocation which immediately
4867  // precedes the arraycopy, and we can take over zeroing the new object.
4868  // The arraycopy will finish the initialization, and provide
4869  // a new control state to which we will anchor the destination pointer.
4870
4871  return alloc;
4872}
4873
4874// Helper for initialization of arrays, creating a ClearArray.
4875// It writes zero bits in [start..end), within the body of an array object.
4876// The memory effects are all chained onto the 'adr_type' alias category.
4877//
4878// Since the object is otherwise uninitialized, we are free
4879// to put a little "slop" around the edges of the cleared area,
4880// as long as it does not go back into the array's header,
4881// or beyond the array end within the heap.
4882//
4883// The lower edge can be rounded down to the nearest jint and the
4884// upper edge can be rounded up to the nearest MinObjAlignmentInBytes.
4885//
4886// Arguments:
4887//   adr_type           memory slice where writes are generated
4888//   dest               oop of the destination array
4889//   basic_elem_type    element type of the destination
4890//   slice_idx          array index of first element to store
4891//   slice_len          number of elements to store (or NULL)
4892//   dest_size          total size in bytes of the array object
4893//
4894// Exactly one of slice_len or dest_size must be non-NULL.
4895// If dest_size is non-NULL, zeroing extends to the end of the object.
4896// If slice_len is non-NULL, the slice_idx value must be a constant.
4897void
4898LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
4899                                     Node* dest,
4900                                     BasicType basic_elem_type,
4901                                     Node* slice_idx,
4902                                     Node* slice_len,
4903                                     Node* dest_size) {
4904  // one or the other but not both of slice_len and dest_size:
4905  assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, "");
4906  if (slice_len == NULL)  slice_len = top();
4907  if (dest_size == NULL)  dest_size = top();
4908
4909  // operate on this memory slice:
4910  Node* mem = memory(adr_type); // memory slice to operate on
4911
4912  // scaling and rounding of indexes:
4913  int scale = exact_log2(type2aelembytes(basic_elem_type));
4914  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
4915  int clear_low = (-1 << scale) & (BytesPerInt  - 1);
4916  int bump_bit  = (-1 << scale) & BytesPerInt;
4917
4918  // determine constant starts and ends
4919  const intptr_t BIG_NEG = -128;
4920  assert(BIG_NEG + 2*abase < 0, "neg enough");
4921  intptr_t slice_idx_con = (intptr_t) find_int_con(slice_idx, BIG_NEG);
4922  intptr_t slice_len_con = (intptr_t) find_int_con(slice_len, BIG_NEG);
4923  if (slice_len_con == 0) {
4924    return;                     // nothing to do here
4925  }
4926  intptr_t start_con = (abase + (slice_idx_con << scale)) & ~clear_low;
4927  intptr_t end_con   = find_intptr_t_con(dest_size, -1);
4928  if (slice_idx_con >= 0 && slice_len_con >= 0) {
4929    assert(end_con < 0, "not two cons");
4930    end_con = round_to(abase + ((slice_idx_con + slice_len_con) << scale),
4931                       BytesPerLong);
4932  }
4933
4934  if (start_con >= 0 && end_con >= 0) {
4935    // Constant start and end.  Simple.
4936    mem = ClearArrayNode::clear_memory(control(), mem, dest,
4937                                       start_con, end_con, &_gvn);
4938  } else if (start_con >= 0 && dest_size != top()) {
4939    // Constant start, pre-rounded end after the tail of the array.
4940    Node* end = dest_size;
4941    mem = ClearArrayNode::clear_memory(control(), mem, dest,
4942                                       start_con, end, &_gvn);
4943  } else if (start_con >= 0 && slice_len != top()) {
4944    // Constant start, non-constant end.  End needs rounding up.
4945    // End offset = round_up(abase + ((slice_idx_con + slice_len) << scale), 8)
4946    intptr_t end_base  = abase + (slice_idx_con << scale);
4947    int      end_round = (-1 << scale) & (BytesPerLong  - 1);
4948    Node*    end       = ConvI2X(slice_len);
4949    if (scale != 0)
4950      end = _gvn.transform( new(C,3) LShiftXNode(end, intcon(scale) ));
4951    end_base += end_round;
4952    end = _gvn.transform( new(C,3) AddXNode(end, MakeConX(end_base)) );
4953    end = _gvn.transform( new(C,3) AndXNode(end, MakeConX(~end_round)) );
4954    mem = ClearArrayNode::clear_memory(control(), mem, dest,
4955                                       start_con, end, &_gvn);
4956  } else if (start_con < 0 && dest_size != top()) {
4957    // Non-constant start, pre-rounded end after the tail of the array.
4958    // This is almost certainly a "round-to-end" operation.
4959    Node* start = slice_idx;
4960    start = ConvI2X(start);
4961    if (scale != 0)
4962      start = _gvn.transform( new(C,3) LShiftXNode( start, intcon(scale) ));
4963    start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(abase)) );
4964    if ((bump_bit | clear_low) != 0) {
4965      int to_clear = (bump_bit | clear_low);
4966      // Align up mod 8, then store a jint zero unconditionally
4967      // just before the mod-8 boundary.
4968      if (((abase + bump_bit) & ~to_clear) - bump_bit
4969          < arrayOopDesc::length_offset_in_bytes() + BytesPerInt) {
4970        bump_bit = 0;
4971        assert((abase & to_clear) == 0, "array base must be long-aligned");
4972      } else {
4973        // Bump 'start' up to (or past) the next jint boundary:
4974        start = _gvn.transform( new(C,3) AddXNode(start, MakeConX(bump_bit)) );
4975        assert((abase & clear_low) == 0, "array base must be int-aligned");
4976      }
4977      // Round bumped 'start' down to jlong boundary in body of array.
4978      start = _gvn.transform( new(C,3) AndXNode(start, MakeConX(~to_clear)) );
4979      if (bump_bit != 0) {
4980        // Store a zero to the immediately preceding jint:
4981        Node* x1 = _gvn.transform( new(C,3) AddXNode(start, MakeConX(-bump_bit)) );
4982        Node* p1 = basic_plus_adr(dest, x1);
4983        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
4984        mem = _gvn.transform(mem);
4985      }
4986    }
4987    Node* end = dest_size; // pre-rounded
4988    mem = ClearArrayNode::clear_memory(control(), mem, dest,
4989                                       start, end, &_gvn);
4990  } else {
4991    // Non-constant start, unrounded non-constant end.
4992    // (Nobody zeroes a random midsection of an array using this routine.)
4993    ShouldNotReachHere();       // fix caller
4994  }
4995
4996  // Done.
4997  set_memory(mem, adr_type);
4998}
4999
5000
5001bool
5002LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
5003                                         BasicType basic_elem_type,
5004                                         AllocateNode* alloc,
5005                                         Node* src,  Node* src_offset,
5006                                         Node* dest, Node* dest_offset,
5007                                         Node* dest_size) {
5008  // See if there is an advantage from block transfer.
5009  int scale = exact_log2(type2aelembytes(basic_elem_type));
5010  if (scale >= LogBytesPerLong)
5011    return false;               // it is already a block transfer
5012
5013  // Look at the alignment of the starting offsets.
5014  int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
5015  const intptr_t BIG_NEG = -128;
5016  assert(BIG_NEG + 2*abase < 0, "neg enough");
5017
5018  intptr_t src_off  = abase + ((intptr_t) find_int_con(src_offset, -1)  << scale);
5019  intptr_t dest_off = abase + ((intptr_t) find_int_con(dest_offset, -1) << scale);
5020  if (src_off < 0 || dest_off < 0)
5021    // At present, we can only understand constants.
5022    return false;
5023
5024  if (((src_off | dest_off) & (BytesPerLong-1)) != 0) {
5025    // Non-aligned; too bad.
5026    // One more chance:  Pick off an initial 32-bit word.
5027    // This is a common case, since abase can be odd mod 8.
5028    if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5029        ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5030      Node* sptr = basic_plus_adr(src,  src_off);
5031      Node* dptr = basic_plus_adr(dest, dest_off);
5032      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type);
5033      store_to_memory(control(), dptr, sval, T_INT, adr_type);
5034      src_off += BytesPerInt;
5035      dest_off += BytesPerInt;
5036    } else {
5037      return false;
5038    }
5039  }
5040  assert(src_off % BytesPerLong == 0, "");
5041  assert(dest_off % BytesPerLong == 0, "");
5042
5043  // Do this copy by giant steps.
5044  Node* sptr  = basic_plus_adr(src,  src_off);
5045  Node* dptr  = basic_plus_adr(dest, dest_off);
5046  Node* countx = dest_size;
5047  countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(dest_off)) );
5048  countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong)) );
5049
5050  bool disjoint_bases = true;   // since alloc != NULL
5051  generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases,
5052                               sptr, NULL, dptr, NULL, countx);
5053
5054  return true;
5055}
5056
5057
5058// Helper function; generates code for the slow case.
5059// We make a call to a runtime method which emulates the native method,
5060// but without the native wrapper overhead.
5061void
5062LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
5063                                        Node* src,  Node* src_offset,
5064                                        Node* dest, Node* dest_offset,
5065                                        Node* copy_length) {
5066  Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
5067                                 OptoRuntime::slow_arraycopy_Type(),
5068                                 OptoRuntime::slow_arraycopy_Java(),
5069                                 "slow_arraycopy", adr_type,
5070                                 src, src_offset, dest, dest_offset,
5071                                 copy_length);
5072
5073  // Handle exceptions thrown by this fellow:
5074  make_slow_call_ex(call, env()->Throwable_klass(), false);
5075}
5076
5077// Helper function; generates code for cases requiring runtime checks.
5078Node*
5079LibraryCallKit::generate_checkcast_arraycopy(const TypePtr* adr_type,
5080                                             Node* dest_elem_klass,
5081                                             Node* src,  Node* src_offset,
5082                                             Node* dest, Node* dest_offset,
5083                                             Node* copy_length) {
5084  if (stopped())  return NULL;
5085
5086  address copyfunc_addr = StubRoutines::checkcast_arraycopy();
5087  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5088    return NULL;
5089  }
5090
5091  // Pick out the parameters required to perform a store-check
5092  // for the target array.  This is an optimistic check.  It will
5093  // look in each non-null element's class, at the desired klass's
5094  // super_check_offset, for the desired klass.
5095  int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc);
5096  Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5097  Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM);
5098  Node* check_offset = _gvn.transform(n3);
5099  Node* check_value  = dest_elem_klass;
5100
5101  Node* src_start  = array_element_address(src,  src_offset,  T_OBJECT);
5102  Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5103
5104  // (We know the arrays are never conjoint, because their types differ.)
5105  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5106                                 OptoRuntime::checkcast_arraycopy_Type(),
5107                                 copyfunc_addr, "checkcast_arraycopy", adr_type,
5108                                 // five arguments, of which two are
5109                                 // intptr_t (jlong in LP64)
5110                                 src_start, dest_start,
5111                                 copy_length XTOP,
5112                                 check_offset XTOP,
5113                                 check_value);
5114
5115  return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5116}
5117
5118
5119// Helper function; generates code for cases requiring runtime checks.
5120Node*
5121LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
5122                                           Node* src,  Node* src_offset,
5123                                           Node* dest, Node* dest_offset,
5124                                           Node* copy_length) {
5125  if (stopped())  return NULL;
5126
5127  address copyfunc_addr = StubRoutines::generic_arraycopy();
5128  if (copyfunc_addr == NULL) { // Stub was not generated, go slow path.
5129    return NULL;
5130  }
5131
5132  Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
5133                    OptoRuntime::generic_arraycopy_Type(),
5134                    copyfunc_addr, "generic_arraycopy", adr_type,
5135                    src, src_offset, dest, dest_offset, copy_length);
5136
5137  return _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms));
5138}
5139
5140// Helper function; generates the fast out-of-line call to an arraycopy stub.
5141void
5142LibraryCallKit::generate_unchecked_arraycopy(const TypePtr* adr_type,
5143                                             BasicType basic_elem_type,
5144                                             bool disjoint_bases,
5145                                             Node* src,  Node* src_offset,
5146                                             Node* dest, Node* dest_offset,
5147                                             Node* copy_length) {
5148  if (stopped())  return;               // nothing to do
5149
5150  Node* src_start  = src;
5151  Node* dest_start = dest;
5152  if (src_offset != NULL || dest_offset != NULL) {
5153    assert(src_offset != NULL && dest_offset != NULL, "");
5154    src_start  = array_element_address(src,  src_offset,  basic_elem_type);
5155    dest_start = array_element_address(dest, dest_offset, basic_elem_type);
5156  }
5157
5158  // Figure out which arraycopy runtime method to call.
5159  const char* copyfunc_name = "arraycopy";
5160  address     copyfunc_addr =
5161      basictype2arraycopy(basic_elem_type, src_offset, dest_offset,
5162                          disjoint_bases, copyfunc_name);
5163
5164  // Call it.  Note that the count_ix value is not scaled to a byte-size.
5165  make_runtime_call(RC_LEAF|RC_NO_FP,
5166                    OptoRuntime::fast_arraycopy_Type(),
5167                    copyfunc_addr, copyfunc_name, adr_type,
5168                    src_start, dest_start, copy_length XTOP);
5169}
5170