parse3.cpp revision 9114:0300297e7df3
1/*
2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileLog.hpp"
27#include "interpreter/linkResolver.hpp"
28#include "memory/universe.inline.hpp"
29#include "oops/objArrayKlass.hpp"
30#include "opto/addnode.hpp"
31#include "opto/castnode.hpp"
32#include "opto/memnode.hpp"
33#include "opto/parse.hpp"
34#include "opto/rootnode.hpp"
35#include "opto/runtime.hpp"
36#include "opto/subnode.hpp"
37#include "runtime/deoptimization.hpp"
38#include "runtime/handles.inline.hpp"
39
40//=============================================================================
41// Helper methods for _get* and _put* bytecodes
42//=============================================================================
43bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
44  // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
45  // Better to check now than to Deoptimize as soon as we execute
46  assert( field->is_static(), "Only check if field is static");
47  // is_being_initialized() is too generous.  It allows access to statics
48  // by threads that are not running the <clinit> before the <clinit> finishes.
49  // return field->holder()->is_being_initialized();
50
51  // The following restriction is correct but conservative.
52  // It is also desirable to allow compilation of methods called from <clinit>
53  // but this generated code will need to be made safe for execution by
54  // other threads, or the transition from interpreted to compiled code would
55  // need to be guarded.
56  ciInstanceKlass *field_holder = field->holder();
57
58  bool access_OK = false;
59  if (method->holder()->is_subclass_of(field_holder)) {
60    if (method->is_static()) {
61      if (method->name() == ciSymbol::class_initializer_name()) {
62        // OK to access static fields inside initializer
63        access_OK = true;
64      }
65    } else {
66      if (method->name() == ciSymbol::object_initializer_name()) {
67        // It's also OK to access static fields inside a constructor,
68        // because any thread calling the constructor must first have
69        // synchronized on the class by executing a '_new' bytecode.
70        access_OK = true;
71      }
72    }
73  }
74
75  return access_OK;
76
77}
78
79
80void Parse::do_field_access(bool is_get, bool is_field) {
81  bool will_link;
82  ciField* field = iter().get_field(will_link);
83  assert(will_link, "getfield: typeflow responsibility");
84
85  ciInstanceKlass* field_holder = field->holder();
86
87  if (is_field == field->is_static()) {
88    // Interpreter will throw java_lang_IncompatibleClassChangeError
89    // Check this before allowing <clinit> methods to access static fields
90    uncommon_trap(Deoptimization::Reason_unhandled,
91                  Deoptimization::Action_none);
92    return;
93  }
94
95  if (!is_field && !field_holder->is_initialized()) {
96    if (!static_field_ok_in_clinit(field, method())) {
97      uncommon_trap(Deoptimization::Reason_uninitialized,
98                    Deoptimization::Action_reinterpret,
99                    NULL, "!static_field_ok_in_clinit");
100      return;
101    }
102  }
103
104  // Deoptimize on putfield writes to call site target field.
105  if (!is_get && field->is_call_site_target()) {
106    uncommon_trap(Deoptimization::Reason_unhandled,
107                  Deoptimization::Action_reinterpret,
108                  NULL, "put to call site target field");
109    return;
110  }
111
112  assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
113
114  // Note:  We do not check for an unloaded field type here any more.
115
116  // Generate code for the object pointer.
117  Node* obj;
118  if (is_field) {
119    int obj_depth = is_get ? 0 : field->type()->size();
120    obj = null_check(peek(obj_depth));
121    // Compile-time detect of null-exception?
122    if (stopped())  return;
123
124#ifdef ASSERT
125    const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
126    assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
127#endif
128
129    if (is_get) {
130      (void) pop();  // pop receiver before getting
131      do_get_xxx(obj, field, is_field);
132    } else {
133      do_put_xxx(obj, field, is_field);
134      (void) pop();  // pop receiver after putting
135    }
136  } else {
137    const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
138    obj = _gvn.makecon(tip);
139    if (is_get) {
140      do_get_xxx(obj, field, is_field);
141    } else {
142      do_put_xxx(obj, field, is_field);
143    }
144  }
145}
146
147
148void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
149  // Does this field have a constant value?  If so, just push the value.
150  if (field->is_constant()) {
151    // final or stable field
152    const Type* con_type = Type::make_constant(field, obj);
153    if (con_type != NULL) {
154      push_node(con_type->basic_type(), makecon(con_type));
155      return;
156    }
157  }
158
159  ciType* field_klass = field->type();
160  bool is_vol = field->is_volatile();
161
162  // Compute address and memory type.
163  int offset = field->offset_in_bytes();
164  const TypePtr* adr_type = C->alias_type(field)->adr_type();
165  Node *adr = basic_plus_adr(obj, obj, offset);
166  BasicType bt = field->layout_type();
167
168  // Build the resultant type of the load
169  const Type *type;
170
171  bool must_assert_null = false;
172
173  if( bt == T_OBJECT ) {
174    if (!field->type()->is_loaded()) {
175      type = TypeInstPtr::BOTTOM;
176      must_assert_null = true;
177    } else if (field->is_constant() && field->is_static()) {
178      // This can happen if the constant oop is non-perm.
179      ciObject* con = field->constant_value().as_object();
180      // Do not "join" in the previous type; it doesn't add value,
181      // and may yield a vacuous result if the field is of interface type.
182      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
183      assert(type != NULL, "field singleton type must be consistent");
184    } else {
185      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
186    }
187  } else {
188    type = Type::get_const_basic_type(bt);
189  }
190  if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
191    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
192  }
193  // Build the load.
194  //
195  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
196  bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
197  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
198
199  // Adjust Java stack
200  if (type2size[bt] == 1)
201    push(ld);
202  else
203    push_pair(ld);
204
205  if (must_assert_null) {
206    // Do not take a trap here.  It's possible that the program
207    // will never load the field's class, and will happily see
208    // null values in this field forever.  Don't stumble into a
209    // trap for such a program, or we might get a long series
210    // of useless recompilations.  (Or, we might load a class
211    // which should not be loaded.)  If we ever see a non-null
212    // value, we will then trap and recompile.  (The trap will
213    // not need to mention the class index, since the class will
214    // already have been loaded if we ever see a non-null value.)
215    // uncommon_trap(iter().get_field_signature_index());
216#ifndef PRODUCT
217    if (PrintOpto && (Verbose || WizardMode)) {
218      method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
219    }
220#endif
221    if (C->log() != NULL) {
222      C->log()->elem("assert_null reason='field' klass='%d'",
223                     C->log()->identify(field->type()));
224    }
225    // If there is going to be a trap, put it at the next bytecode:
226    set_bci(iter().next_bci());
227    null_assert(peek());
228    set_bci(iter().cur_bci()); // put it back
229  }
230
231  // If reference is volatile, prevent following memory ops from
232  // floating up past the volatile read.  Also prevents commoning
233  // another volatile read.
234  if (field->is_volatile()) {
235    // Memory barrier includes bogus read of value to force load BEFORE membar
236    insert_mem_bar(Op_MemBarAcquire, ld);
237  }
238}
239
240void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
241  bool is_vol = field->is_volatile();
242  // If reference is volatile, prevent following memory ops from
243  // floating down past the volatile write.  Also prevents commoning
244  // another volatile read.
245  if (is_vol)  insert_mem_bar(Op_MemBarRelease);
246
247  // Compute address and memory type.
248  int offset = field->offset_in_bytes();
249  const TypePtr* adr_type = C->alias_type(field)->adr_type();
250  Node* adr = basic_plus_adr(obj, obj, offset);
251  BasicType bt = field->layout_type();
252  // Value to be stored
253  Node* val = type2size[bt] == 1 ? pop() : pop_pair();
254  // Round doubles before storing
255  if (bt == T_DOUBLE)  val = dstore_rounding(val);
256
257  // Conservatively release stores of object references.
258  const MemNode::MemOrd mo =
259    is_vol ?
260    // Volatile fields need releasing stores.
261    MemNode::release :
262    // Non-volatile fields also need releasing stores if they hold an
263    // object reference, because the object reference might point to
264    // a freshly created object.
265    StoreNode::release_if_reference(bt);
266
267  // Store the value.
268  Node* store;
269  if (bt == T_OBJECT) {
270    const TypeOopPtr* field_type;
271    if (!field->type()->is_loaded()) {
272      field_type = TypeInstPtr::BOTTOM;
273    } else {
274      field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
275    }
276    store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
277  } else {
278    bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
279    store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
280  }
281
282  // If reference is volatile, prevent following volatiles ops from
283  // floating up before the volatile write.
284  if (is_vol) {
285    // If not multiple copy atomic, we do the MemBarVolatile before the load.
286    if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
287      insert_mem_bar(Op_MemBarVolatile); // Use fat membar
288    }
289    // Remember we wrote a volatile field.
290    // For not multiple copy atomic cpu (ppc64) a barrier should be issued
291    // in constructors which have such stores. See do_exits() in parse1.cpp.
292    if (is_field) {
293      set_wrote_volatile(true);
294    }
295  }
296
297  if (is_field) {
298    set_wrote_fields(true);
299  }
300
301  // If the field is final, the rules of Java say we are in <init> or <clinit>.
302  // Note the presence of writes to final non-static fields, so that we
303  // can insert a memory barrier later on to keep the writes from floating
304  // out of the constructor.
305  // Any method can write a @Stable field; insert memory barriers after those also.
306  if (is_field && (field->is_final() || field->is_stable())) {
307    if (field->is_final()) {
308        set_wrote_final(true);
309    }
310    if (field->is_stable()) {
311        set_wrote_stable(true);
312    }
313
314    // Preserve allocation ptr to create precedent edge to it in membar
315    // generated on exit from constructor.
316    if (C->eliminate_boxing() &&
317        adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
318        AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
319      set_alloc_with_final(obj);
320    }
321  }
322}
323
324//=============================================================================
325void Parse::do_anewarray() {
326  bool will_link;
327  ciKlass* klass = iter().get_klass(will_link);
328
329  // Uncommon Trap when class that array contains is not loaded
330  // we need the loaded class for the rest of graph; do not
331  // initialize the container class (see Java spec)!!!
332  assert(will_link, "anewarray: typeflow responsibility");
333
334  ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
335  // Check that array_klass object is loaded
336  if (!array_klass->is_loaded()) {
337    // Generate uncommon_trap for unloaded array_class
338    uncommon_trap(Deoptimization::Reason_unloaded,
339                  Deoptimization::Action_reinterpret,
340                  array_klass);
341    return;
342  }
343
344  kill_dead_locals();
345
346  const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
347  Node* count_val = pop();
348  Node* obj = new_array(makecon(array_klass_type), count_val, 1);
349  push(obj);
350}
351
352
353void Parse::do_newarray(BasicType elem_type) {
354  kill_dead_locals();
355
356  Node*   count_val = pop();
357  const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
358  Node*   obj = new_array(makecon(array_klass), count_val, 1);
359  // Push resultant oop onto stack
360  push(obj);
361}
362
363// Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
364// Also handle the degenerate 1-dimensional case of anewarray.
365Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
366  Node* length = lengths[0];
367  assert(length != NULL, "");
368  Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
369  if (ndimensions > 1) {
370    jint length_con = find_int_con(length, -1);
371    guarantee(length_con >= 0, "non-constant multianewarray");
372    ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
373    const TypePtr* adr_type = TypeAryPtr::OOPS;
374    const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
375    const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
376    for (jint i = 0; i < length_con; i++) {
377      Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
378      intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
379      Node*    eaddr  = basic_plus_adr(array, offset);
380      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
381    }
382  }
383  return array;
384}
385
386void Parse::do_multianewarray() {
387  int ndimensions = iter().get_dimensions();
388
389  // the m-dimensional array
390  bool will_link;
391  ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
392  assert(will_link, "multianewarray: typeflow responsibility");
393
394  // Note:  Array classes are always initialized; no is_initialized check.
395
396  kill_dead_locals();
397
398  // get the lengths from the stack (first dimension is on top)
399  Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
400  length[ndimensions] = NULL;  // terminating null for make_runtime_call
401  int j;
402  for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
403
404  // The original expression was of this form: new T[length0][length1]...
405  // It is often the case that the lengths are small (except the last).
406  // If that happens, use the fast 1-d creator a constant number of times.
407  const jint expand_limit = MIN2((jint)MultiArrayExpandLimit, 100);
408  jint expand_count = 1;        // count of allocations in the expansion
409  jint expand_fanout = 1;       // running total fanout
410  for (j = 0; j < ndimensions-1; j++) {
411    jint dim_con = find_int_con(length[j], -1);
412    expand_fanout *= dim_con;
413    expand_count  += expand_fanout; // count the level-J sub-arrays
414    if (dim_con <= 0
415        || dim_con > expand_limit
416        || expand_count > expand_limit) {
417      expand_count = 0;
418      break;
419    }
420  }
421
422  // Can use multianewarray instead of [a]newarray if only one dimension,
423  // or if all non-final dimensions are small constants.
424  if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
425    Node* obj = NULL;
426    // Set the original stack and the reexecute bit for the interpreter
427    // to reexecute the multianewarray bytecode if deoptimization happens.
428    // Do it unconditionally even for one dimension multianewarray.
429    // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
430    // when AllocateArray node for newarray is created.
431    { PreserveReexecuteState preexecs(this);
432      inc_sp(ndimensions);
433      // Pass 0 as nargs since uncommon trap code does not need to restore stack.
434      obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
435    } //original reexecute and sp are set back here
436    push(obj);
437    return;
438  }
439
440  address fun = NULL;
441  switch (ndimensions) {
442  case 1: ShouldNotReachHere(); break;
443  case 2: fun = OptoRuntime::multianewarray2_Java(); break;
444  case 3: fun = OptoRuntime::multianewarray3_Java(); break;
445  case 4: fun = OptoRuntime::multianewarray4_Java(); break;
446  case 5: fun = OptoRuntime::multianewarray5_Java(); break;
447  };
448  Node* c = NULL;
449
450  if (fun != NULL) {
451    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
452                          OptoRuntime::multianewarray_Type(ndimensions),
453                          fun, NULL, TypeRawPtr::BOTTOM,
454                          makecon(TypeKlassPtr::make(array_klass)),
455                          length[0], length[1], length[2],
456                          (ndimensions > 2) ? length[3] : NULL,
457                          (ndimensions > 3) ? length[4] : NULL);
458  } else {
459    // Create a java array for dimension sizes
460    Node* dims = NULL;
461    { PreserveReexecuteState preexecs(this);
462      inc_sp(ndimensions);
463      Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
464      dims = new_array(dims_array_klass, intcon(ndimensions), 0);
465
466      // Fill-in it with values
467      for (j = 0; j < ndimensions; j++) {
468        Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
469        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
470      }
471    }
472
473    c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
474                          OptoRuntime::multianewarrayN_Type(),
475                          OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM,
476                          makecon(TypeKlassPtr::make(array_klass)),
477                          dims);
478  }
479  make_slow_call_ex(c, env()->Throwable_klass(), false);
480
481  Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms));
482
483  const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
484
485  // Improve the type:  We know it's not null, exact, and of a given length.
486  type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
487  type = type->is_aryptr()->cast_to_exactness(true);
488
489  const TypeInt* ltype = _gvn.find_int_type(length[0]);
490  if (ltype != NULL)
491    type = type->is_aryptr()->cast_to_size(ltype);
492
493    // We cannot sharpen the nested sub-arrays, since the top level is mutable.
494
495  Node* cast = _gvn.transform( new CheckCastPPNode(control(), res, type) );
496  push(cast);
497
498  // Possible improvements:
499  // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
500  // - Issue CastII against length[*] values, to TypeInt::POS.
501}
502