interpreterRuntime.cpp revision 726:be93aad57795
1/*
2 * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_interpreterRuntime.cpp.incl"
27
28class UnlockFlagSaver {
29  private:
30    JavaThread* _thread;
31    bool _do_not_unlock;
32  public:
33    UnlockFlagSaver(JavaThread* t) {
34      _thread = t;
35      _do_not_unlock = t->do_not_unlock_if_synchronized();
36      t->set_do_not_unlock_if_synchronized(false);
37    }
38    ~UnlockFlagSaver() {
39      _thread->set_do_not_unlock_if_synchronized(_do_not_unlock);
40    }
41};
42
43//------------------------------------------------------------------------------------------------------------------------
44// State accessors
45
46void InterpreterRuntime::set_bcp_and_mdp(address bcp, JavaThread *thread) {
47  last_frame(thread).interpreter_frame_set_bcp(bcp);
48  if (ProfileInterpreter) {
49    // ProfileTraps uses MDOs independently of ProfileInterpreter.
50    // That is why we must check both ProfileInterpreter and mdo != NULL.
51    methodDataOop mdo = last_frame(thread).interpreter_frame_method()->method_data();
52    if (mdo != NULL) {
53      NEEDS_CLEANUP;
54      last_frame(thread).interpreter_frame_set_mdp(mdo->bci_to_dp(last_frame(thread).interpreter_frame_bci()));
55    }
56  }
57}
58
59//------------------------------------------------------------------------------------------------------------------------
60// Constants
61
62
63IRT_ENTRY(void, InterpreterRuntime::ldc(JavaThread* thread, bool wide))
64  // access constant pool
65  constantPoolOop pool = method(thread)->constants();
66  int index = wide ? two_byte_index(thread) : one_byte_index(thread);
67  constantTag tag = pool->tag_at(index);
68
69  if (tag.is_unresolved_klass() || tag.is_klass()) {
70    klassOop klass = pool->klass_at(index, CHECK);
71    oop java_class = klass->klass_part()->java_mirror();
72    thread->set_vm_result(java_class);
73  } else {
74#ifdef ASSERT
75    // If we entered this runtime routine, we believed the tag contained
76    // an unresolved string, an unresolved class or a resolved class.
77    // However, another thread could have resolved the unresolved string
78    // or class by the time we go there.
79    assert(tag.is_unresolved_string()|| tag.is_string(), "expected string");
80#endif
81    oop s_oop = pool->string_at(index, CHECK);
82    thread->set_vm_result(s_oop);
83  }
84IRT_END
85
86
87//------------------------------------------------------------------------------------------------------------------------
88// Allocation
89
90IRT_ENTRY(void, InterpreterRuntime::_new(JavaThread* thread, constantPoolOopDesc* pool, int index))
91  klassOop k_oop = pool->klass_at(index, CHECK);
92  instanceKlassHandle klass (THREAD, k_oop);
93
94  // Make sure we are not instantiating an abstract klass
95  klass->check_valid_for_instantiation(true, CHECK);
96
97  // Make sure klass is initialized
98  klass->initialize(CHECK);
99
100  // At this point the class may not be fully initialized
101  // because of recursive initialization. If it is fully
102  // initialized & has_finalized is not set, we rewrite
103  // it into its fast version (Note: no locking is needed
104  // here since this is an atomic byte write and can be
105  // done more than once).
106  //
107  // Note: In case of classes with has_finalized we don't
108  //       rewrite since that saves us an extra check in
109  //       the fast version which then would call the
110  //       slow version anyway (and do a call back into
111  //       Java).
112  //       If we have a breakpoint, then we don't rewrite
113  //       because the _breakpoint bytecode would be lost.
114  oop obj = klass->allocate_instance(CHECK);
115  thread->set_vm_result(obj);
116IRT_END
117
118
119IRT_ENTRY(void, InterpreterRuntime::newarray(JavaThread* thread, BasicType type, jint size))
120  oop obj = oopFactory::new_typeArray(type, size, CHECK);
121  thread->set_vm_result(obj);
122IRT_END
123
124
125IRT_ENTRY(void, InterpreterRuntime::anewarray(JavaThread* thread, constantPoolOopDesc* pool, int index, jint size))
126  // Note: no oopHandle for pool & klass needed since they are not used
127  //       anymore after new_objArray() and no GC can happen before.
128  //       (This may have to change if this code changes!)
129  klassOop  klass = pool->klass_at(index, CHECK);
130  objArrayOop obj = oopFactory::new_objArray(klass, size, CHECK);
131  thread->set_vm_result(obj);
132IRT_END
133
134
135IRT_ENTRY(void, InterpreterRuntime::multianewarray(JavaThread* thread, jint* first_size_address))
136  // We may want to pass in more arguments - could make this slightly faster
137  constantPoolOop constants = method(thread)->constants();
138  int          i = two_byte_index(thread);
139  klassOop klass = constants->klass_at(i, CHECK);
140  int   nof_dims = number_of_dimensions(thread);
141  assert(oop(klass)->is_klass(), "not a class");
142  assert(nof_dims >= 1, "multianewarray rank must be nonzero");
143
144  // We must create an array of jints to pass to multi_allocate.
145  ResourceMark rm(thread);
146  const int small_dims = 10;
147  jint dim_array[small_dims];
148  jint *dims = &dim_array[0];
149  if (nof_dims > small_dims) {
150    dims = (jint*) NEW_RESOURCE_ARRAY(jint, nof_dims);
151  }
152  for (int index = 0; index < nof_dims; index++) {
153    // offset from first_size_address is addressed as local[index]
154    int n = Interpreter::local_offset_in_bytes(index)/jintSize;
155    dims[index] = first_size_address[n];
156  }
157  oop obj = arrayKlass::cast(klass)->multi_allocate(nof_dims, dims, CHECK);
158  thread->set_vm_result(obj);
159IRT_END
160
161
162IRT_ENTRY(void, InterpreterRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
163  assert(obj->is_oop(), "must be a valid oop");
164  assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
165  instanceKlass::register_finalizer(instanceOop(obj), CHECK);
166IRT_END
167
168
169// Quicken instance-of and check-cast bytecodes
170IRT_ENTRY(void, InterpreterRuntime::quicken_io_cc(JavaThread* thread))
171  // Force resolving; quicken the bytecode
172  int which = two_byte_index(thread);
173  constantPoolOop cpool = method(thread)->constants();
174  // We'd expect to assert that we're only here to quicken bytecodes, but in a multithreaded
175  // program we might have seen an unquick'd bytecode in the interpreter but have another
176  // thread quicken the bytecode before we get here.
177  // assert( cpool->tag_at(which).is_unresolved_klass(), "should only come here to quicken bytecodes" );
178  klassOop klass = cpool->klass_at(which, CHECK);
179  thread->set_vm_result(klass);
180IRT_END
181
182
183//------------------------------------------------------------------------------------------------------------------------
184// Exceptions
185
186// Assume the compiler is (or will be) interested in this event.
187// If necessary, create an MDO to hold the information, and record it.
188void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
189  assert(ProfileTraps, "call me only if profiling");
190  methodHandle trap_method(thread, method(thread));
191  if (trap_method.not_null()) {
192    methodDataHandle trap_mdo(thread, trap_method->method_data());
193    if (trap_mdo.is_null()) {
194      methodOopDesc::build_interpreter_method_data(trap_method, THREAD);
195      if (HAS_PENDING_EXCEPTION) {
196        assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
197        CLEAR_PENDING_EXCEPTION;
198      }
199      trap_mdo = methodDataHandle(thread, trap_method->method_data());
200      // and fall through...
201    }
202    if (trap_mdo.not_null()) {
203      // Update per-method count of trap events.  The interpreter
204      // is updating the MDO to simulate the effect of compiler traps.
205      int trap_bci = trap_method->bci_from(bcp(thread));
206      Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason);
207    }
208  }
209}
210
211static Handle get_preinitialized_exception(klassOop k, TRAPS) {
212  // get klass
213  instanceKlass* klass = instanceKlass::cast(k);
214  assert(klass->is_initialized(),
215         "this klass should have been initialized during VM initialization");
216  // create instance - do not call constructor since we may have no
217  // (java) stack space left (should assert constructor is empty)
218  Handle exception;
219  oop exception_oop = klass->allocate_instance(CHECK_(exception));
220  exception = Handle(THREAD, exception_oop);
221  if (StackTraceInThrowable) {
222    java_lang_Throwable::fill_in_stack_trace(exception);
223  }
224  return exception;
225}
226
227// Special handling for stack overflow: since we don't have any (java) stack
228// space left we use the pre-allocated & pre-initialized StackOverflowError
229// klass to create an stack overflow error instance.  We do not call its
230// constructor for the same reason (it is empty, anyway).
231IRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* thread))
232  Handle exception = get_preinitialized_exception(
233                                 SystemDictionary::StackOverflowError_klass(),
234                                 CHECK);
235  THROW_HANDLE(exception);
236IRT_END
237
238
239IRT_ENTRY(void, InterpreterRuntime::create_exception(JavaThread* thread, char* name, char* message))
240  // lookup exception klass
241  symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
242  if (ProfileTraps) {
243    if (s == vmSymbols::java_lang_ArithmeticException()) {
244      note_trap(thread, Deoptimization::Reason_div0_check, CHECK);
245    } else if (s == vmSymbols::java_lang_NullPointerException()) {
246      note_trap(thread, Deoptimization::Reason_null_check, CHECK);
247    }
248  }
249  // create exception
250  Handle exception = Exceptions::new_exception(thread, s(), message);
251  thread->set_vm_result(exception());
252IRT_END
253
254
255IRT_ENTRY(void, InterpreterRuntime::create_klass_exception(JavaThread* thread, char* name, oopDesc* obj))
256  ResourceMark rm(thread);
257  const char* klass_name = Klass::cast(obj->klass())->external_name();
258  // lookup exception klass
259  symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
260  if (ProfileTraps) {
261    note_trap(thread, Deoptimization::Reason_class_check, CHECK);
262  }
263  // create exception, with klass name as detail message
264  Handle exception = Exceptions::new_exception(thread, s(), klass_name);
265  thread->set_vm_result(exception());
266IRT_END
267
268
269IRT_ENTRY(void, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index))
270  char message[jintAsStringSize];
271  // lookup exception klass
272  symbolHandle s = oopFactory::new_symbol_handle(name, CHECK);
273  if (ProfileTraps) {
274    note_trap(thread, Deoptimization::Reason_range_check, CHECK);
275  }
276  // create exception
277  sprintf(message, "%d", index);
278  THROW_MSG(s(), message);
279IRT_END
280
281IRT_ENTRY(void, InterpreterRuntime::throw_ClassCastException(
282  JavaThread* thread, oopDesc* obj))
283
284  ResourceMark rm(thread);
285  char* message = SharedRuntime::generate_class_cast_message(
286    thread, Klass::cast(obj->klass())->external_name());
287
288  if (ProfileTraps) {
289    note_trap(thread, Deoptimization::Reason_class_check, CHECK);
290  }
291
292  // create exception
293  THROW_MSG(vmSymbols::java_lang_ClassCastException(), message);
294IRT_END
295
296// required can be either a MethodType, or a Class (for a single argument)
297// actual (if not null) can be either a MethodHandle, or an arbitrary value (for a single argument)
298IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* thread,
299                                                                   oopDesc* required,
300                                                                   oopDesc* actual)) {
301  ResourceMark rm(thread);
302  char* message = SharedRuntime::generate_wrong_method_type_message(thread, required, actual);
303
304  if (ProfileTraps) {
305    note_trap(thread, Deoptimization::Reason_constraint, CHECK);
306  }
307
308  // create exception
309  THROW_MSG(vmSymbols::java_dyn_WrongMethodTypeException(), message);
310}
311IRT_END
312
313
314
315// exception_handler_for_exception(...) returns the continuation address,
316// the exception oop (via TLS) and sets the bci/bcp for the continuation.
317// The exception oop is returned to make sure it is preserved over GC (it
318// is only on the stack if the exception was thrown explicitly via athrow).
319// During this operation, the expression stack contains the values for the
320// bci where the exception happened. If the exception was propagated back
321// from a call, the expression stack contains the values for the bci at the
322// invoke w/o arguments (i.e., as if one were inside the call).
323IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThread* thread, oopDesc* exception))
324
325  Handle             h_exception(thread, exception);
326  methodHandle       h_method   (thread, method(thread));
327  constantPoolHandle h_constants(thread, h_method->constants());
328  typeArrayHandle    h_extable  (thread, h_method->exception_table());
329  bool               should_repeat;
330  int                handler_bci;
331  int                current_bci = bcp(thread) - h_method->code_base();
332
333  // Need to do this check first since when _do_not_unlock_if_synchronized
334  // is set, we don't want to trigger any classloading which may make calls
335  // into java, or surprisingly find a matching exception handler for bci 0
336  // since at this moment the method hasn't been "officially" entered yet.
337  if (thread->do_not_unlock_if_synchronized()) {
338    ResourceMark rm;
339    assert(current_bci == 0,  "bci isn't zero for do_not_unlock_if_synchronized");
340    thread->set_vm_result(exception);
341#ifdef CC_INTERP
342    return (address) -1;
343#else
344    return Interpreter::remove_activation_entry();
345#endif
346  }
347
348  do {
349    should_repeat = false;
350
351    // assertions
352#ifdef ASSERT
353    assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
354    assert(h_exception->is_oop(), "just checking");
355    // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
356    if (!(h_exception->is_a(SystemDictionary::throwable_klass()))) {
357      if (ExitVMOnVerifyError) vm_exit(-1);
358      ShouldNotReachHere();
359    }
360#endif
361
362    // tracing
363    if (TraceExceptions) {
364      ttyLocker ttyl;
365      ResourceMark rm(thread);
366      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", h_exception->print_value_string(), (address)h_exception());
367      tty->print_cr(" thrown in interpreter method <%s>", h_method->print_value_string());
368      tty->print_cr(" at bci %d for thread " INTPTR_FORMAT, current_bci, thread);
369    }
370// Don't go paging in something which won't be used.
371//     else if (h_extable->length() == 0) {
372//       // disabled for now - interpreter is not using shortcut yet
373//       // (shortcut is not to call runtime if we have no exception handlers)
374//       // warning("performance bug: should not call runtime if method has no exception handlers");
375//     }
376    // for AbortVMOnException flag
377    NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
378
379    // exception handler lookup
380    KlassHandle h_klass(THREAD, h_exception->klass());
381    handler_bci = h_method->fast_exception_handler_bci_for(h_klass, current_bci, THREAD);
382    if (HAS_PENDING_EXCEPTION) {
383      // We threw an exception while trying to find the exception handler.
384      // Transfer the new exception to the exception handle which will
385      // be set into thread local storage, and do another lookup for an
386      // exception handler for this exception, this time starting at the
387      // BCI of the exception handler which caused the exception to be
388      // thrown (bug 4307310).
389      h_exception = Handle(THREAD, PENDING_EXCEPTION);
390      CLEAR_PENDING_EXCEPTION;
391      if (handler_bci >= 0) {
392        current_bci = handler_bci;
393        should_repeat = true;
394      }
395    }
396  } while (should_repeat == true);
397
398  // notify JVMTI of an exception throw; JVMTI will detect if this is a first
399  // time throw or a stack unwinding throw and accordingly notify the debugger
400  if (JvmtiExport::can_post_exceptions()) {
401    JvmtiExport::post_exception_throw(thread, h_method(), bcp(thread), h_exception());
402  }
403
404#ifdef CC_INTERP
405  address continuation = (address)(intptr_t) handler_bci;
406#else
407  address continuation = NULL;
408#endif
409  address handler_pc = NULL;
410  if (handler_bci < 0 || !thread->reguard_stack((address) &continuation)) {
411    // Forward exception to callee (leaving bci/bcp untouched) because (a) no
412    // handler in this method, or (b) after a stack overflow there is not yet
413    // enough stack space available to reprotect the stack.
414#ifndef CC_INTERP
415    continuation = Interpreter::remove_activation_entry();
416#endif
417    // Count this for compilation purposes
418    h_method->interpreter_throwout_increment();
419  } else {
420    // handler in this method => change bci/bcp to handler bci/bcp and continue there
421    handler_pc = h_method->code_base() + handler_bci;
422#ifndef CC_INTERP
423    set_bcp_and_mdp(handler_pc, thread);
424    continuation = Interpreter::dispatch_table(vtos)[*handler_pc];
425#endif
426  }
427  // notify debugger of an exception catch
428  // (this is good for exceptions caught in native methods as well)
429  if (JvmtiExport::can_post_exceptions()) {
430    JvmtiExport::notice_unwind_due_to_exception(thread, h_method(), handler_pc, h_exception(), (handler_pc != NULL));
431  }
432
433  thread->set_vm_result(h_exception());
434  return continuation;
435IRT_END
436
437
438IRT_ENTRY(void, InterpreterRuntime::throw_pending_exception(JavaThread* thread))
439  assert(thread->has_pending_exception(), "must only ne called if there's an exception pending");
440  // nothing to do - eventually we should remove this code entirely (see comments @ call sites)
441IRT_END
442
443
444IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodError(JavaThread* thread))
445  THROW(vmSymbols::java_lang_AbstractMethodError());
446IRT_END
447
448
449IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
450  THROW(vmSymbols::java_lang_IncompatibleClassChangeError());
451IRT_END
452
453
454//------------------------------------------------------------------------------------------------------------------------
455// Fields
456//
457
458IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
459  // resolve field
460  FieldAccessInfo info;
461  constantPoolHandle pool(thread, method(thread)->constants());
462  bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
463
464  {
465    JvmtiHideSingleStepping jhss(thread);
466    LinkResolver::resolve_field(info, pool, two_byte_index(thread),
467                                bytecode, false, CHECK);
468  } // end JvmtiHideSingleStepping
469
470  // check if link resolution caused cpCache to be updated
471  if (already_resolved(thread)) return;
472
473  // compute auxiliary field attributes
474  TosState state  = as_TosState(info.field_type());
475
476  // We need to delay resolving put instructions on final fields
477  // until we actually invoke one. This is required so we throw
478  // exceptions at the correct place. If we do not resolve completely
479  // in the current pass, leaving the put_code set to zero will
480  // cause the next put instruction to reresolve.
481  bool is_put = (bytecode == Bytecodes::_putfield ||
482                 bytecode == Bytecodes::_putstatic);
483  Bytecodes::Code put_code = (Bytecodes::Code)0;
484
485  // We also need to delay resolving getstatic instructions until the
486  // class is intitialized.  This is required so that access to the static
487  // field will call the initialization function every time until the class
488  // is completely initialized ala. in 2.17.5 in JVM Specification.
489  instanceKlass *klass = instanceKlass::cast(info.klass()->as_klassOop());
490  bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
491                               !klass->is_initialized());
492  Bytecodes::Code get_code = (Bytecodes::Code)0;
493
494
495  if (!uninitialized_static) {
496    get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
497    if (is_put || !info.access_flags().is_final()) {
498      put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
499    }
500  }
501
502  cache_entry(thread)->set_field(
503    get_code,
504    put_code,
505    info.klass(),
506    info.field_index(),
507    info.field_offset(),
508    state,
509    info.access_flags().is_final(),
510    info.access_flags().is_volatile()
511  );
512IRT_END
513
514
515//------------------------------------------------------------------------------------------------------------------------
516// Synchronization
517//
518// The interpreter's synchronization code is factored out so that it can
519// be shared by method invocation and synchronized blocks.
520//%note synchronization_3
521
522static void trace_locking(Handle& h_locking_obj, bool is_locking) {
523  ObjectSynchronizer::trace_locking(h_locking_obj, false, true, is_locking);
524}
525
526
527//%note monitor_1
528IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem))
529#ifdef ASSERT
530  thread->last_frame().interpreter_frame_verify_monitor(elem);
531#endif
532  if (PrintBiasedLockingStatistics) {
533    Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
534  }
535  Handle h_obj(thread, elem->obj());
536  assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
537         "must be NULL or an object");
538  if (UseBiasedLocking) {
539    // Retry fast entry if bias is revoked to avoid unnecessary inflation
540    ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK);
541  } else {
542    ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK);
543  }
544  assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
545         "must be NULL or an object");
546#ifdef ASSERT
547  thread->last_frame().interpreter_frame_verify_monitor(elem);
548#endif
549IRT_END
550
551
552//%note monitor_1
553IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorexit(JavaThread* thread, BasicObjectLock* elem))
554#ifdef ASSERT
555  thread->last_frame().interpreter_frame_verify_monitor(elem);
556#endif
557  Handle h_obj(thread, elem->obj());
558  assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
559         "must be NULL or an object");
560  if (elem == NULL || h_obj()->is_unlocked()) {
561    THROW(vmSymbols::java_lang_IllegalMonitorStateException());
562  }
563  ObjectSynchronizer::slow_exit(h_obj(), elem->lock(), thread);
564  // Free entry. This must be done here, since a pending exception might be installed on
565  // exit. If it is not cleared, the exception handling code will try to unlock the monitor again.
566  elem->set_obj(NULL);
567#ifdef ASSERT
568  thread->last_frame().interpreter_frame_verify_monitor(elem);
569#endif
570IRT_END
571
572
573IRT_ENTRY(void, InterpreterRuntime::throw_illegal_monitor_state_exception(JavaThread* thread))
574  THROW(vmSymbols::java_lang_IllegalMonitorStateException());
575IRT_END
576
577
578IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThread* thread))
579  // Returns an illegal exception to install into the current thread. The
580  // pending_exception flag is cleared so normal exception handling does not
581  // trigger. Any current installed exception will be overwritten. This
582  // method will be called during an exception unwind.
583
584  assert(!HAS_PENDING_EXCEPTION, "no pending exception");
585  Handle exception(thread, thread->vm_result());
586  assert(exception() != NULL, "vm result should be set");
587  thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
588  if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
589    exception = get_preinitialized_exception(
590                       SystemDictionary::IllegalMonitorStateException_klass(),
591                       CATCH);
592  }
593  thread->set_vm_result(exception());
594IRT_END
595
596
597//------------------------------------------------------------------------------------------------------------------------
598// Invokes
599
600IRT_ENTRY(Bytecodes::Code, InterpreterRuntime::get_original_bytecode_at(JavaThread* thread, methodOopDesc* method, address bcp))
601  return method->orig_bytecode_at(method->bci_from(bcp));
602IRT_END
603
604IRT_ENTRY(void, InterpreterRuntime::set_original_bytecode_at(JavaThread* thread, methodOopDesc* method, address bcp, Bytecodes::Code new_code))
605  method->set_orig_bytecode_at(method->bci_from(bcp), new_code);
606IRT_END
607
608IRT_ENTRY(void, InterpreterRuntime::_breakpoint(JavaThread* thread, methodOopDesc* method, address bcp))
609  JvmtiExport::post_raw_breakpoint(thread, method, bcp);
610IRT_END
611
612IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes::Code bytecode))
613  // extract receiver from the outgoing argument list if necessary
614  Handle receiver(thread, NULL);
615  if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) {
616    ResourceMark rm(thread);
617    methodHandle m (thread, method(thread));
618    int bci = m->bci_from(bcp(thread));
619    Bytecode_invoke* call = Bytecode_invoke_at(m, bci);
620    symbolHandle signature (thread, call->signature());
621    receiver = Handle(thread,
622                  thread->last_frame().interpreter_callee_receiver(signature));
623    assert(Universe::heap()->is_in_reserved_or_null(receiver()),
624           "sanity check");
625    assert(receiver.is_null() ||
626           Universe::heap()->is_in_reserved(receiver->klass()),
627           "sanity check");
628  }
629
630  // resolve method
631  CallInfo info;
632  constantPoolHandle pool(thread, method(thread)->constants());
633
634  {
635    JvmtiHideSingleStepping jhss(thread);
636    LinkResolver::resolve_invoke(info, receiver, pool,
637                                 two_byte_index(thread), bytecode, CHECK);
638    if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
639      int retry_count = 0;
640      while (info.resolved_method()->is_old()) {
641        // It is very unlikely that method is redefined more than 100 times
642        // in the middle of resolve. If it is looping here more than 100 times
643        // means then there could be a bug here.
644        guarantee((retry_count++ < 100),
645                  "Could not resolve to latest version of redefined method");
646        // method is redefined in the middle of resolve so re-try.
647        LinkResolver::resolve_invoke(info, receiver, pool,
648                                     two_byte_index(thread), bytecode, CHECK);
649      }
650    }
651  } // end JvmtiHideSingleStepping
652
653  // check if link resolution caused cpCache to be updated
654  if (already_resolved(thread)) return;
655
656  if (bytecode == Bytecodes::_invokeinterface) {
657
658    if (TraceItables && Verbose) {
659      ResourceMark rm(thread);
660      tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
661    }
662    if (info.resolved_method()->method_holder() ==
663                                            SystemDictionary::object_klass()) {
664      // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
665      // (see also cpCacheOop.cpp for details)
666      methodHandle rm = info.resolved_method();
667      assert(rm->is_final() || info.has_vtable_index(),
668             "should have been set already");
669      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
670    } else {
671      // Setup itable entry
672      int index = klassItable::compute_itable_index(info.resolved_method()());
673      cache_entry(thread)->set_interface_call(info.resolved_method(), index);
674    }
675  } else {
676    cache_entry(thread)->set_method(
677      bytecode,
678      info.resolved_method(),
679      info.vtable_index());
680  }
681IRT_END
682
683
684// First time execution:  Resolve symbols, create a permanent CallSiteImpl object.
685IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
686  ResourceMark rm(thread);
687
688  assert(EnableInvokeDynamic, "");
689
690  const Bytecodes::Code bytecode = Bytecodes::_invokedynamic;
691
692  methodHandle caller_method(thread, method(thread));
693
694  // first determine if there is a bootstrap method
695  {
696    KlassHandle caller_klass(thread, caller_method->method_holder());
697    Handle bootm = SystemDictionary::find_bootstrap_method(caller_klass, KlassHandle(), CHECK);
698    if (bootm.is_null()) {
699      // If there is no bootstrap method, throw IncompatibleClassChangeError.
700      // This is a valid generic error type for resolution (JLS 12.3.3).
701      char buf[200];
702      jio_snprintf(buf, sizeof(buf), "Class %s has not declared a bootstrap method for invokedynamic",
703                   (Klass::cast(caller_klass()))->external_name());
704      THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
705    }
706  }
707
708  constantPoolHandle pool(thread, caller_method->constants());
709  pool->set_invokedynamic();    // mark header to flag active call sites
710
711  int raw_index = four_byte_index(thread);
712  assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
713
714  // there are two CPC entries that are of interest:
715  int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
716  int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
717  // and there is one CP entry, a NameAndType:
718  int nt_index = pool->map_instruction_operand_to_index(raw_index);
719
720  // first resolve the signature to a MH.invoke methodOop
721  if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
722    JvmtiHideSingleStepping jhss(thread);
723    CallInfo info;
724    LinkResolver::resolve_invoke(info, Handle(), pool,
725                                 raw_index, bytecode, CHECK);
726    // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
727    // as a common reference point for all invokedynamic call sites with
728    // that exact call descriptor.  We will link it in the CP cache exactly
729    // as if it were an invokevirtual of MethodHandle.invoke.
730    pool->cache()->entry_at(main_index)->set_method(
731      bytecode,
732      info.resolved_method(),
733      info.vtable_index());
734    assert(pool->cache()->entry_at(main_index)->is_vfinal(), "f2 must be a methodOop");
735  }
736
737  // The method (f2 entry) of the main entry is the MH.invoke for the
738  // invokedynamic target call signature.
739  intptr_t f2_value = pool->cache()->entry_at(main_index)->f2();
740  methodHandle mh_invdyn(THREAD, (methodOop) f2_value);
741  assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
742         "correct result from LinkResolver::resolve_invokedynamic");
743
744  symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
745  Handle call_site
746    = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
747                                               caller_method->method_idnum(),
748                                               caller_method->bci_from(bcp(thread)),
749                                               call_site_name,
750                                               mh_invdyn,
751                                               CHECK);
752
753  // In the secondary entry, the f1 field is the call site, and the f2 (index)
754  // field is some data about the invoke site.
755  int extra_data = 0;
756  pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
757}
758IRT_END
759
760
761// Called on first time execution, and also whenever the CallSite.target is null.
762// FIXME:  Do more of this in Java code.
763IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
764  methodHandle   mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
765  Handle         mh_type(thread,   mh_invdyn->method_handle_type());
766  objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
767
768  // squish the arguments down to a single array
769  int nargs = mh_ptypes->length();
770  objArrayHandle arg_array;
771  {
772    objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
773    arg_array = objArrayHandle(thread, aaoop);
774  }
775  frame fr = thread->last_frame();
776  assert(fr.interpreter_frame_bcp() != NULL, "sanity");
777  int tos_offset = 0;
778  for (int i = nargs; --i >= 0; ) {
779    intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
780    oop ptype = mh_ptypes->obj_at(i);
781    oop arg = NULL;
782    if (!java_lang_Class::is_primitive(ptype)) {
783      arg = *(oop*) slot_addr;
784    } else {
785      BasicType bt = java_lang_Class::primitive_type(ptype);
786      assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
787      jvalue value;
788      Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
789      tos_offset += type2size[bt]-1;
790      arg = java_lang_boxing_object::create(bt, &value, CHECK);
791      // FIXME:  These boxing objects are not canonicalized under
792      // the Java autoboxing rules.  They should be...
793      // The best approach would be to push the arglist creation into Java.
794      // The JVM should use a lower-level interface to communicate argument lists.
795    }
796    arg_array->obj_at_put(i, arg);
797  }
798
799  // now find the bootstrap method
800  oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
801  assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
802
803  // return the bootstrap method and argument array via vm_result/_2
804  thread->set_vm_result(bootstrap_mh_oop);
805  thread->set_vm_result_2(arg_array());
806}
807IRT_END
808
809
810
811//------------------------------------------------------------------------------------------------------------------------
812// Miscellaneous
813
814
815#ifndef PRODUCT
816static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
817  if (TraceInvocationCounterOverflow) {
818    InvocationCounter* ic = m->invocation_counter();
819    InvocationCounter* bc = m->backedge_counter();
820    ResourceMark rm;
821    const char* msg =
822      branch_bcp == NULL
823      ? "comp-policy cntr ovfl @ %d in entry of "
824      : "comp-policy cntr ovfl @ %d in loop of ";
825    tty->print(msg, bci);
826    m->print_value();
827    tty->cr();
828    ic->print();
829    bc->print();
830    if (ProfileInterpreter) {
831      if (branch_bcp != NULL) {
832        methodDataOop mdo = m->method_data();
833        if (mdo != NULL) {
834          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
835          tty->print_cr("back branch count = %d", count);
836        }
837      }
838    }
839  }
840}
841
842static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
843  if (TraceOnStackReplacement) {
844    ResourceMark rm;
845    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
846    method->print_short_name(tty);
847    tty->print_cr(" at bci %d", bci);
848  }
849}
850#endif // !PRODUCT
851
852IRT_ENTRY(nmethod*,
853          InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp))
854  // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
855  // flag, in case this method triggers classloading which will call into Java.
856  UnlockFlagSaver fs(thread);
857
858  frame fr = thread->last_frame();
859  assert(fr.is_interpreted_frame(), "must come from interpreter");
860  methodHandle method(thread, fr.interpreter_frame_method());
861  const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0;
862  const int bci = method->bci_from(fr.interpreter_frame_bcp());
863  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
864
865  if (JvmtiExport::can_post_interpreter_events()) {
866    if (thread->is_interp_only_mode()) {
867      // If certain JVMTI events (e.g. frame pop event) are requested then the
868      // thread is forced to remain in interpreted code. This is
869      // implemented partly by a check in the run_compiled_code
870      // section of the interpreter whether we should skip running
871      // compiled code, and partly by skipping OSR compiles for
872      // interpreted-only threads.
873      if (branch_bcp != NULL) {
874        CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
875        return NULL;
876      }
877    }
878  }
879
880  if (branch_bcp == NULL) {
881    // when code cache is full, compilation gets switched off, UseCompiler
882    // is set to false
883    if (!method->has_compiled_code() && UseCompiler) {
884      CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
885    } else {
886      // Force counter overflow on method entry, even if no compilation
887      // happened.  (The method_invocation_event call does this also.)
888      CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
889    }
890    // compilation at an invocation overflow no longer goes and retries test for
891    // compiled method. We always run the loser of the race as interpreted.
892    // so return NULL
893    return NULL;
894  } else {
895    // counter overflow in a loop => try to do on-stack-replacement
896    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
897    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
898    // when code cache is full, we should not compile any more...
899    if (osr_nm == NULL && UseCompiler) {
900      const int branch_bci = method->bci_from(branch_bcp);
901      CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
902      osr_nm = method->lookup_osr_nmethod_for(bci);
903    }
904    if (osr_nm == NULL) {
905      CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
906      return NULL;
907    } else {
908      // We may need to do on-stack replacement which requires that no
909      // monitors in the activation are biased because their
910      // BasicObjectLocks will need to migrate during OSR. Force
911      // unbiasing of all monitors in the activation now (even though
912      // the OSR nmethod might be invalidated) because we don't have a
913      // safepoint opportunity later once the migration begins.
914      if (UseBiasedLocking) {
915        ResourceMark rm;
916        GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
917        for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
918             kptr < fr.interpreter_frame_monitor_begin();
919             kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
920          if( kptr->obj() != NULL ) {
921            objects_to_revoke->append(Handle(THREAD, kptr->obj()));
922          }
923        }
924        BiasedLocking::revoke(objects_to_revoke);
925      }
926
927      return osr_nm;
928    }
929  }
930IRT_END
931
932IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))
933  assert(ProfileInterpreter, "must be profiling interpreter");
934  int bci = method->bci_from(cur_bcp);
935  methodDataOop mdo = method->method_data();
936  if (mdo == NULL)  return 0;
937  return mdo->bci_to_di(bci);
938IRT_END
939
940IRT_ENTRY(jint, InterpreterRuntime::profile_method(JavaThread* thread, address cur_bcp))
941  // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
942  // flag, in case this method triggers classloading which will call into Java.
943  UnlockFlagSaver fs(thread);
944
945  assert(ProfileInterpreter, "must be profiling interpreter");
946  frame fr = thread->last_frame();
947  assert(fr.is_interpreted_frame(), "must come from interpreter");
948  methodHandle method(thread, fr.interpreter_frame_method());
949  int bci = method->bci_from(cur_bcp);
950  methodOopDesc::build_interpreter_method_data(method, THREAD);
951  if (HAS_PENDING_EXCEPTION) {
952    assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
953    CLEAR_PENDING_EXCEPTION;
954    // and fall through...
955  }
956  methodDataOop mdo = method->method_data();
957  if (mdo == NULL)  return 0;
958  return mdo->bci_to_di(bci);
959IRT_END
960
961
962#ifdef ASSERT
963IRT_LEAF(void, InterpreterRuntime::verify_mdp(methodOopDesc* method, address bcp, address mdp))
964  assert(ProfileInterpreter, "must be profiling interpreter");
965
966  methodDataOop mdo = method->method_data();
967  assert(mdo != NULL, "must not be null");
968
969  int bci = method->bci_from(bcp);
970
971  address mdp2 = mdo->bci_to_dp(bci);
972  if (mdp != mdp2) {
973    ResourceMark rm;
974    ResetNoHandleMark rnm; // In a LEAF entry.
975    HandleMark hm;
976    tty->print_cr("FAILED verify : actual mdp %p   expected mdp %p @ bci %d", mdp, mdp2, bci);
977    int current_di = mdo->dp_to_di(mdp);
978    int expected_di  = mdo->dp_to_di(mdp2);
979    tty->print_cr("  actual di %d   expected di %d", current_di, expected_di);
980    int expected_approx_bci = mdo->data_at(expected_di)->bci();
981    int approx_bci = -1;
982    if (current_di >= 0) {
983      approx_bci = mdo->data_at(current_di)->bci();
984    }
985    tty->print_cr("  actual bci is %d  expected bci %d", approx_bci, expected_approx_bci);
986    mdo->print_on(tty);
987    method->print_codes();
988  }
989  assert(mdp == mdp2, "wrong mdp");
990IRT_END
991#endif // ASSERT
992
993IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci))
994  assert(ProfileInterpreter, "must be profiling interpreter");
995  ResourceMark rm(thread);
996  HandleMark hm(thread);
997  frame fr = thread->last_frame();
998  assert(fr.is_interpreted_frame(), "must come from interpreter");
999  methodDataHandle h_mdo(thread, fr.interpreter_frame_method()->method_data());
1000
1001  // Grab a lock to ensure atomic access to setting the return bci and
1002  // the displacement.  This can block and GC, invalidating all naked oops.
1003  MutexLocker ml(RetData_lock);
1004
1005  // ProfileData is essentially a wrapper around a derived oop, so we
1006  // need to take the lock before making any ProfileData structures.
1007  ProfileData* data = h_mdo->data_at(h_mdo->dp_to_di(fr.interpreter_frame_mdp()));
1008  RetData* rdata = data->as_RetData();
1009  address new_mdp = rdata->fixup_ret(return_bci, h_mdo);
1010  fr.interpreter_frame_set_mdp(new_mdp);
1011IRT_END
1012
1013
1014IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread))
1015  // We used to need an explict preserve_arguments here for invoke bytecodes. However,
1016  // stack traversal automatically takes care of preserving arguments for invoke, so
1017  // this is no longer needed.
1018
1019  // IRT_END does an implicit safepoint check, hence we are guaranteed to block
1020  // if this is called during a safepoint
1021
1022  if (JvmtiExport::should_post_single_step()) {
1023    // We are called during regular safepoints and when the VM is
1024    // single stepping. If any thread is marked for single stepping,
1025    // then we may have JVMTI work to do.
1026    JvmtiExport::at_single_stepping_point(thread, method(thread), bcp(thread));
1027  }
1028IRT_END
1029
1030IRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread *thread, oopDesc* obj,
1031ConstantPoolCacheEntry *cp_entry))
1032
1033  // check the access_flags for the field in the klass
1034  instanceKlass* ik = instanceKlass::cast((klassOop)cp_entry->f1());
1035  typeArrayOop fields = ik->fields();
1036  int index = cp_entry->field_index();
1037  assert(index < fields->length(), "holders field index is out of range");
1038  // bail out if field accesses are not watched
1039  if ((fields->ushort_at(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return;
1040
1041  switch(cp_entry->flag_state()) {
1042    case btos:    // fall through
1043    case ctos:    // fall through
1044    case stos:    // fall through
1045    case itos:    // fall through
1046    case ftos:    // fall through
1047    case ltos:    // fall through
1048    case dtos:    // fall through
1049    case atos: break;
1050    default: ShouldNotReachHere(); return;
1051  }
1052  bool is_static = (obj == NULL);
1053  HandleMark hm(thread);
1054
1055  Handle h_obj;
1056  if (!is_static) {
1057    // non-static field accessors have an object, but we need a handle
1058    h_obj = Handle(thread, obj);
1059  }
1060  instanceKlassHandle h_cp_entry_f1(thread, (klassOop)cp_entry->f1());
1061  jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2(), is_static);
1062  JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid);
1063IRT_END
1064
1065IRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread *thread,
1066  oopDesc* obj, ConstantPoolCacheEntry *cp_entry, jvalue *value))
1067
1068  klassOop k = (klassOop)cp_entry->f1();
1069
1070  // check the access_flags for the field in the klass
1071  instanceKlass* ik = instanceKlass::cast(k);
1072  typeArrayOop fields = ik->fields();
1073  int index = cp_entry->field_index();
1074  assert(index < fields->length(), "holders field index is out of range");
1075  // bail out if field modifications are not watched
1076  if ((fields->ushort_at(index) & JVM_ACC_FIELD_MODIFICATION_WATCHED) == 0) return;
1077
1078  char sig_type = '\0';
1079
1080  switch(cp_entry->flag_state()) {
1081    case btos: sig_type = 'Z'; break;
1082    case ctos: sig_type = 'C'; break;
1083    case stos: sig_type = 'S'; break;
1084    case itos: sig_type = 'I'; break;
1085    case ftos: sig_type = 'F'; break;
1086    case atos: sig_type = 'L'; break;
1087    case ltos: sig_type = 'J'; break;
1088    case dtos: sig_type = 'D'; break;
1089    default:  ShouldNotReachHere(); return;
1090  }
1091  bool is_static = (obj == NULL);
1092
1093  HandleMark hm(thread);
1094  instanceKlassHandle h_klass(thread, k);
1095  jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2(), is_static);
1096  jvalue fvalue;
1097#ifdef _LP64
1098  fvalue = *value;
1099#else
1100  // Long/double values are stored unaligned and also noncontiguously with
1101  // tagged stacks.  We can't just do a simple assignment even in the non-
1102  // J/D cases because a C++ compiler is allowed to assume that a jvalue is
1103  // 8-byte aligned, and interpreter stack slots are only 4-byte aligned.
1104  // We assume that the two halves of longs/doubles are stored in interpreter
1105  // stack slots in platform-endian order.
1106  jlong_accessor u;
1107  jint* newval = (jint*)value;
1108  u.words[0] = newval[0];
1109  u.words[1] = newval[Interpreter::stackElementWords()]; // skip if tag
1110  fvalue.j = u.long_value;
1111#endif // _LP64
1112
1113  Handle h_obj;
1114  if (!is_static) {
1115    // non-static field accessors have an object, but we need a handle
1116    h_obj = Handle(thread, obj);
1117  }
1118
1119  JvmtiExport::post_raw_field_modification(thread, method(thread), bcp(thread), h_klass, h_obj,
1120                                           fid, sig_type, &fvalue);
1121IRT_END
1122
1123IRT_ENTRY(void, InterpreterRuntime::post_method_entry(JavaThread *thread))
1124  JvmtiExport::post_method_entry(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
1125IRT_END
1126
1127
1128IRT_ENTRY(void, InterpreterRuntime::post_method_exit(JavaThread *thread))
1129  JvmtiExport::post_method_exit(thread, InterpreterRuntime::method(thread), InterpreterRuntime::last_frame(thread));
1130IRT_END
1131
1132IRT_LEAF(int, InterpreterRuntime::interpreter_contains(address pc))
1133{
1134  return (Interpreter::contains(pc) ? 1 : 0);
1135}
1136IRT_END
1137
1138
1139// Implementation of SignatureHandlerLibrary
1140
1141address SignatureHandlerLibrary::set_handler_blob() {
1142  BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
1143  if (handler_blob == NULL) {
1144    return NULL;
1145  }
1146  address handler = handler_blob->instructions_begin();
1147  _handler_blob = handler_blob;
1148  _handler = handler;
1149  return handler;
1150}
1151
1152void SignatureHandlerLibrary::initialize() {
1153  if (_fingerprints != NULL) {
1154    return;
1155  }
1156  if (set_handler_blob() == NULL) {
1157    vm_exit_out_of_memory(blob_size, "native signature handlers");
1158  }
1159
1160  BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
1161                                      SignatureHandlerLibrary::buffer_size);
1162  _buffer = bb->instructions_begin();
1163
1164  _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
1165  _handlers     = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true);
1166}
1167
1168address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
1169  address handler   = _handler;
1170  int     code_size = buffer->pure_code_size();
1171  if (handler + code_size > _handler_blob->instructions_end()) {
1172    // get a new handler blob
1173    handler = set_handler_blob();
1174  }
1175  if (handler != NULL) {
1176    memcpy(handler, buffer->code_begin(), code_size);
1177    pd_set_handler(handler);
1178    ICache::invalidate_range(handler, code_size);
1179    _handler = handler + code_size;
1180  }
1181  return handler;
1182}
1183
1184void SignatureHandlerLibrary::add(methodHandle method) {
1185  if (method->signature_handler() == NULL) {
1186    // use slow signature handler if we can't do better
1187    int handler_index = -1;
1188    // check if we can use customized (fast) signature handler
1189    if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) {
1190      // use customized signature handler
1191      MutexLocker mu(SignatureHandlerLibrary_lock);
1192      // make sure data structure is initialized
1193      initialize();
1194      // lookup method signature's fingerprint
1195      uint64_t fingerprint = Fingerprinter(method).fingerprint();
1196      handler_index = _fingerprints->find(fingerprint);
1197      // create handler if necessary
1198      if (handler_index < 0) {
1199        ResourceMark rm;
1200        ptrdiff_t align_offset = (address)
1201          round_to((intptr_t)_buffer, CodeEntryAlignment) - (address)_buffer;
1202        CodeBuffer buffer((address)(_buffer + align_offset),
1203                          SignatureHandlerLibrary::buffer_size - align_offset);
1204        InterpreterRuntime::SignatureHandlerGenerator(method, &buffer).generate(fingerprint);
1205        // copy into code heap
1206        address handler = set_handler(&buffer);
1207        if (handler == NULL) {
1208          // use slow signature handler
1209        } else {
1210          // debugging suppport
1211          if (PrintSignatureHandlers) {
1212            tty->cr();
1213            tty->print_cr("argument handler #%d for: %s %s (fingerprint = " UINT64_FORMAT ", %d bytes generated)",
1214                          _handlers->length(),
1215                          (method->is_static() ? "static" : "receiver"),
1216                          method->name_and_sig_as_C_string(),
1217                          fingerprint,
1218                          buffer.code_size());
1219            Disassembler::decode(handler, handler + buffer.code_size());
1220#ifndef PRODUCT
1221            tty->print_cr(" --- associated result handler ---");
1222            address rh_begin = Interpreter::result_handler(method()->result_type());
1223            address rh_end = rh_begin;
1224            while (*(int*)rh_end != 0) {
1225              rh_end += sizeof(int);
1226            }
1227            Disassembler::decode(rh_begin, rh_end);
1228#endif
1229          }
1230          // add handler to library
1231          _fingerprints->append(fingerprint);
1232          _handlers->append(handler);
1233          // set handler index
1234          assert(_fingerprints->length() == _handlers->length(), "sanity check");
1235          handler_index = _fingerprints->length() - 1;
1236        }
1237      }
1238    } else {
1239      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1240    }
1241    if (handler_index < 0) {
1242      // use generic signature handler
1243      method->set_signature_handler(Interpreter::slow_signature_handler());
1244    } else {
1245      // set handler
1246      method->set_signature_handler(_handlers->at(handler_index));
1247    }
1248  }
1249  assert(method->signature_handler() == Interpreter::slow_signature_handler() ||
1250         _handlers->find(method->signature_handler()) == _fingerprints->find(Fingerprinter(method).fingerprint()),
1251         "sanity check");
1252}
1253
1254
1255BufferBlob*              SignatureHandlerLibrary::_handler_blob = NULL;
1256address                  SignatureHandlerLibrary::_handler      = NULL;
1257GrowableArray<uint64_t>* SignatureHandlerLibrary::_fingerprints = NULL;
1258GrowableArray<address>*  SignatureHandlerLibrary::_handlers     = NULL;
1259address                  SignatureHandlerLibrary::_buffer       = NULL;
1260
1261
1262IRT_ENTRY(void, InterpreterRuntime::prepare_native_call(JavaThread* thread, methodOopDesc* method))
1263  methodHandle m(thread, method);
1264  assert(m->is_native(), "sanity check");
1265  // lookup native function entry point if it doesn't exist
1266  bool in_base_library;
1267  if (!m->has_native_function()) {
1268    NativeLookup::lookup(m, in_base_library, CHECK);
1269  }
1270  // make sure signature handler is installed
1271  SignatureHandlerLibrary::add(m);
1272  // The interpreter entry point checks the signature handler first,
1273  // before trying to fetch the native entry point and klass mirror.
1274  // We must set the signature handler last, so that multiple processors
1275  // preparing the same method will be sure to see non-null entry & mirror.
1276IRT_END
1277
1278#if defined(IA32) || defined(AMD64)
1279IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address))
1280  if (src_address == dest_address) {
1281    return;
1282  }
1283  ResetNoHandleMark rnm; // In a LEAF entry.
1284  HandleMark hm;
1285  ResourceMark rm;
1286  frame fr = thread->last_frame();
1287  assert(fr.is_interpreted_frame(), "");
1288  jint bci = fr.interpreter_frame_bci();
1289  methodHandle mh(thread, fr.interpreter_frame_method());
1290  Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
1291  ArgumentSizeComputer asc(invoke->signature());
1292  int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
1293  Copy::conjoint_bytes(src_address, dest_address,
1294                       size_of_arguments * Interpreter::stackElementSize());
1295IRT_END
1296#endif
1297