deoptimization.cpp revision 1472:c18cbe5936b8
11590Srgrimes/*
21590Srgrimes * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
31590Srgrimes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
41590Srgrimes *
51590Srgrimes * This code is free software; you can redistribute it and/or modify it
61590Srgrimes * under the terms of the GNU General Public License version 2 only, as
71590Srgrimes * published by the Free Software Foundation.
81590Srgrimes *
91590Srgrimes * This code is distributed in the hope that it will be useful, but WITHOUT
101590Srgrimes * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
111590Srgrimes * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
121590Srgrimes * version 2 for more details (a copy is included in the LICENSE file that
131590Srgrimes * accompanied this code).
141590Srgrimes *
151590Srgrimes * You should have received a copy of the GNU General Public License version
161590Srgrimes * 2 along with this work; if not, write to the Free Software Foundation,
171590Srgrimes * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
181590Srgrimes *
191590Srgrimes * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
201590Srgrimes * or visit www.oracle.com if you need additional information or have any
211590Srgrimes * questions.
221590Srgrimes *
231590Srgrimes */
241590Srgrimes
251590Srgrimes#include "incls/_precompiled.incl"
261590Srgrimes#include "incls/_deoptimization.cpp.incl"
271590Srgrimes
281590Srgrimesbool DeoptimizationMarker::_is_active = false;
291590Srgrimes
301590SrgrimesDeoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
311590Srgrimes                                         int  caller_adjustment,
321590Srgrimes                                         int  number_of_frames,
331590Srgrimes                                         intptr_t* frame_sizes,
341590Srgrimes                                         address* frame_pcs,
351590Srgrimes                                         BasicType return_type) {
361590Srgrimes  _size_of_deoptimized_frame = size_of_deoptimized_frame;
371590Srgrimes  _caller_adjustment         = caller_adjustment;
3827169Scharnier  _number_of_frames          = number_of_frames;
3923693Speter  _frame_sizes               = frame_sizes;
4027169Scharnier  _frame_pcs                 = frame_pcs;
4127169Scharnier  _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
4250477Speter  _return_type               = return_type;
4327169Scharnier  // PD (x86 only)
441590Srgrimes  _counter_temp              = 0;
451590Srgrimes  _initial_fp                = 0;
461590Srgrimes  _unpack_kind               = 0;
4736792Simp  _sender_sp_temp            = 0;
481590Srgrimes
491590Srgrimes  _total_frame_sizes         = size_of_frames();
501590Srgrimes}
511590Srgrimes
521590Srgrimes
5327169ScharnierDeoptimization::UnrollBlock::~UnrollBlock() {
541590Srgrimes  FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
551590Srgrimes  FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
561590Srgrimes  FREE_C_HEAP_ARRAY(intptr_t, _register_block);
571590Srgrimes}
581590Srgrimes
591590Srgrimes
6012909Swollmanintptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
6146662Sobrien  assert(register_number < RegisterMap::reg_count, "checking register number");
6246662Sobrien  return &_register_block[register_number * 2];
631590Srgrimes}
641590Srgrimes
6566199Swollman
6666199Swollman
6746662Sobrienint Deoptimization::UnrollBlock::size_of_frames() const {
6866199Swollman  // Acount first for the adjustment of the initial frame
6966199Swollman  int result = _caller_adjustment;
7066199Swollman  for (int index = 0; index < number_of_frames(); index++) {
7166199Swollman    result += frame_sizes()[index];
7246662Sobrien  }
731590Srgrimes  return result;
741590Srgrimes}
751590Srgrimes
7666199Swollman
7766199Swollmanvoid Deoptimization::UnrollBlock::print() {
7866199Swollman  ttyLocker ttyl;
7966199Swollman  tty->print_cr("UnrollBlock");
801590Srgrimes  tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
8166199Swollman  tty->print(   "  frame_sizes: ");
8266199Swollman  for (int index = 0; index < number_of_frames(); index++) {
831590Srgrimes    tty->print("%d ", frame_sizes()[index]);
8427169Scharnier  }
8546662Sobrien  tty->cr();
8666199Swollman}
8766199Swollman
8866199Swollman
8966199Swollman// In order to make fetch_unroll_info work properly with escape
9066199Swollman// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
9166199Swollman// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
9266199Swollman// of previously eliminated objects occurs in realloc_objects, which is
9366199Swollman// called from the method fetch_unroll_info_helper below.
9466199SwollmanJRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
951590Srgrimes  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
961590Srgrimes  // but makes the entry a little slower. There is however a little dance we have to
9766199Swollman  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
9866199Swollman
9966199Swollman  // fetch_unroll_info() is called at the beginning of the deoptimization
10066315Swollman  // handler. Note this fact before we start generating temporary frames
10166315Swollman  // that can confuse an asynchronous stack walker. This counter is
10266315Swollman  // decremented at the end of unpack_frames().
10366315Swollman  thread->inc_in_deopt_handler();
10466315Swollman
10566315Swollman  return fetch_unroll_info_helper(thread);
10666199SwollmanJRT_END
10766199Swollman
10866199Swollman
10966199Swollman// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
11066199SwollmanDeoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
11166199Swollman
11266199Swollman  // Note: there is a safepoint safety issue here. No matter whether we enter
1131590Srgrimes  // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
11466199Swollman  // the vframeArray is created.
11566199Swollman  //
11666199Swollman
11766199Swollman  // Allocate our special deoptimization ResourceMark
11866199Swollman  DeoptResourceMark* dmark = new DeoptResourceMark(thread);
11966199Swollman  assert(thread->deopt_mark() == NULL, "Pending deopt!");
12066199Swollman  thread->set_deopt_mark(dmark);
12166199Swollman
12266199Swollman  frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
12366199Swollman  RegisterMap map(thread, true);
12466199Swollman  RegisterMap dummy_map(thread, false);
12566199Swollman  // Now get the deoptee with a valid map
12666199Swollman  frame deoptee = stub_frame.sender(&map);
12766199Swollman
12866199Swollman  // Create a growable array of VFrames where each VFrame represents an inlined
12966199Swollman  // Java frame.  This storage is allocated with the usual system arena.
13066199Swollman  assert(deoptee.is_compiled_frame(), "Wrong frame type");
13166199Swollman  GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
1321590Srgrimes  vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
1331590Srgrimes  while (!vf->is_top()) {
13466199Swollman    assert(vf->is_compiled_frame(), "Wrong frame type");
13566199Swollman    chunk->push(compiledVFrame::cast(vf));
13612909Swollman    vf = vf->sender();
13712909Swollman  }
13812909Swollman  assert(vf->is_compiled_frame(), "Wrong frame type");
13912909Swollman  chunk->push(compiledVFrame::cast(vf));
14027835Swollman
14112909Swollman#ifdef COMPILER2
1421590Srgrimes  // Reallocate the non-escaping objects and restore their fields. Then
14312909Swollman  // relock objects if synchronization on them was eliminated.
14412909Swollman  if (DoEscapeAnalysis) {
14512909Swollman    if (EliminateAllocations) {
14612909Swollman      assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
1471590Srgrimes      GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
14812909Swollman
14912909Swollman      // The flag return_oop() indicates call sites which return oop
15012909Swollman      // in compiled code. Such sites include java method calls,
15112909Swollman      // runtime calls (for example, used to allocate new objects/arrays
1521590Srgrimes      // on slow code path) and any other calls generated in compiled code.
15366199Swollman      // It is not guaranteed that we can get such information here only
15466199Swollman      // by analyzing bytecode in deoptimized frames. This is why this flag
15566199Swollman      // is set during method compilation (see Compile::Process_OopMap_Node()).
15666199Swollman      bool save_oop_result = chunk->at(0)->scope()->return_oop();
15766199Swollman      Handle return_value;
15866199Swollman      if (save_oop_result) {
15966199Swollman        // Reallocation may trigger GC. If deoptimization happened on return from
16066199Swollman        // call which returns oop we need to save it since it is not in oopmap.
16166199Swollman        oop result = deoptee.saved_oop_result(&map);
16266199Swollman        assert(result == NULL || result->is_oop(), "must be oop");
16314631Solah        return_value = Handle(thread, result);
16414631Solah        assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
16527835Swollman        if (TraceDeoptimization) {
16666199Swollman          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
16712909Swollman        }
16866199Swollman      }
16912909Swollman      bool reallocated = false;
17012909Swollman      if (objects != NULL) {
1711590Srgrimes        JRT_BLOCK
1721590Srgrimes          reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
1731590Srgrimes        JRT_END
1741590Srgrimes      }
1751590Srgrimes      if (reallocated) {
1761590Srgrimes        reassign_fields(&deoptee, &map, objects);
1771590Srgrimes#ifndef PRODUCT
1781590Srgrimes        if (TraceDeoptimization) {
1791590Srgrimes          ttyLocker ttyl;
1801590Srgrimes          tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
1811590Srgrimes          print_objects(objects);
1828874Srgrimes        }
18323693Speter#endif
18423693Speter      }
18546662Sobrien      if (save_oop_result) {
18646662Sobrien        // Restore result.
1871590Srgrimes        deoptee.set_saved_oop_result(&map, return_value());
18846662Sobrien      }
18946662Sobrien    }
19046662Sobrien    if (EliminateLocks) {
19146662Sobrien#ifndef PRODUCT
19246662Sobrien      bool first = true;
1931590Srgrimes#endif
1941590Srgrimes      for (int i = 0; i < chunk->length(); i++) {
1951590Srgrimes        compiledVFrame* cvf = chunk->at(i);
1961590Srgrimes        assert (cvf->scope() != NULL,"expect only compiled java frames");
1971590Srgrimes        GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1981590Srgrimes        if (monitors->is_nonempty()) {
19914472Sache          relock_objects(monitors, thread);
20014472Sache#ifndef PRODUCT
2011590Srgrimes          if (TraceDeoptimization) {
20214472Sache            ttyLocker ttyl;
2031590Srgrimes            for (int j = 0; j < monitors->length(); j++) {
2041590Srgrimes              MonitorInfo* mi = monitors->at(j);
2051590Srgrimes              if (mi->eliminated()) {
2061590Srgrimes                if (first) {
2071590Srgrimes                  first = false;
2081590Srgrimes                  tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
2091590Srgrimes                }
2101590Srgrimes                tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
21146662Sobrien              }
21246662Sobrien            }
21346662Sobrien          }
21446662Sobrien#endif
21546662Sobrien        }
21646662Sobrien      }
21746662Sobrien    }
2181590Srgrimes  }
21912909Swollman#endif // COMPILER2
22012909Swollman  // Ensure that no safepoint is taken after pointers have been stored
22112909Swollman  // in fields of rematerialized objects.  If a safepoint occurs from here on
22212909Swollman  // out the java state residing in the vframeArray will be missed.
22366199Swollman  No_Safepoint_Verifier no_safepoint;
22412909Swollman
22566199Swollman  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
22666199Swollman
22766199Swollman  assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
22866199Swollman  thread->set_vframe_array_head(array);
22912909Swollman
23066199Swollman  // Now that the vframeArray has been created if we have any deferred local writes
2311590Srgrimes  // added by jvmti then we can free up that structure as the data is now in the
23266199Swollman  // vframeArray
23366199Swollman
23466199Swollman  if (thread->deferred_locals() != NULL) {
23566199Swollman    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
23666199Swollman    int i = 0;
23766199Swollman    do {
23866199Swollman      // Because of inlining we could have multiple vframes for a single frame
23966199Swollman      // and several of the vframes could have deferred writes. Find them all.
24066199Swollman      if (list->at(i)->id() == array->original().id()) {
24166199Swollman        jvmtiDeferredLocalVariableSet* dlv = list->at(i);
24266199Swollman        list->remove_at(i);
24366199Swollman        // individual jvmtiDeferredLocalVariableSet are CHeapObj's
24466199Swollman        delete dlv;
24566199Swollman      } else {
24666199Swollman        i++;
24766199Swollman      }
24866199Swollman    } while ( i < list->length() );
24966199Swollman    if (list->length() == 0) {
25066199Swollman      thread->set_deferred_locals(NULL);
25166199Swollman      // free the list and elements back to C heap.
25266199Swollman      delete list;
253    }
254
255  }
256
257  // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
258  CodeBlob* cb = stub_frame.cb();
259  // Verify we have the right vframeArray
260  assert(cb->frame_size() >= 0, "Unexpected frame size");
261  intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
262
263  // If the deopt call site is a MethodHandle invoke call site we have
264  // to adjust the unpack_sp.
265  nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
266  if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
267    unpack_sp = deoptee.unextended_sp();
268
269#ifdef ASSERT
270  assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
271  Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
272#endif
273  // This is a guarantee instead of an assert because if vframe doesn't match
274  // we will unpack the wrong deoptimized frame and wind up in strange places
275  // where it will be very difficult to figure out what went wrong. Better
276  // to die an early death here than some very obscure death later when the
277  // trail is cold.
278  // Note: on ia64 this guarantee can be fooled by frames with no memory stack
279  // in that it will fail to detect a problem when there is one. This needs
280  // more work in tiger timeframe.
281  guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
282
283  int number_of_frames = array->frames();
284
285  // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
286  // virtual activation, which is the reverse of the elements in the vframes array.
287  intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
288  // +1 because we always have an interpreter return address for the final slot.
289  address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
290  int callee_parameters = 0;
291  int callee_locals = 0;
292  int popframe_extra_args = 0;
293  // Create an interpreter return address for the stub to use as its return
294  // address so the skeletal frames are perfectly walkable
295  frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
296
297  // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
298  // activation be put back on the expression stack of the caller for reexecution
299  if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
300    popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
301  }
302
303  //
304  // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
305  // frame_sizes/frame_pcs[1] next oldest frame (int)
306  // frame_sizes/frame_pcs[n] youngest frame (int)
307  //
308  // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
309  // owns the space for the return address to it's caller).  Confusing ain't it.
310  //
311  // The vframe array can address vframes with indices running from
312  // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
313  // When we create the skeletal frames we need the oldest frame to be in the zero slot
314  // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
315  // so things look a little strange in this loop.
316  //
317  for (int index = 0; index < array->frames(); index++ ) {
318    // frame[number_of_frames - 1 ] = on_stack_size(youngest)
319    // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
320    // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
321    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
322                                                                                                    callee_locals,
323                                                                                                    index == 0,
324                                                                                                    popframe_extra_args);
325    // This pc doesn't have to be perfect just good enough to identify the frame
326    // as interpreted so the skeleton frame will be walkable
327    // The correct pc will be set when the skeleton frame is completely filled out
328    // The final pc we store in the loop is wrong and will be overwritten below
329    frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
330
331    callee_parameters = array->element(index)->method()->size_of_parameters();
332    callee_locals = array->element(index)->method()->max_locals();
333    popframe_extra_args = 0;
334  }
335
336  // Compute whether the root vframe returns a float or double value.
337  BasicType return_type;
338  {
339    HandleMark hm;
340    methodHandle method(thread, array->element(0)->method());
341    Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
342    return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
343  }
344
345  // Compute information for handling adapters and adjusting the frame size of the caller.
346  int caller_adjustment = 0;
347
348  // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
349  // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
350  // than simply use array->sender.pc(). This requires us to walk the current set of frames
351  //
352  frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
353  deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
354
355  // Compute the amount the oldest interpreter frame will have to adjust
356  // its caller's stack by. If the caller is a compiled frame then
357  // we pretend that the callee has no parameters so that the
358  // extension counts for the full amount of locals and not just
359  // locals-parms. This is because without a c2i adapter the parm
360  // area as created by the compiled frame will not be usable by
361  // the interpreter. (Depending on the calling convention there
362  // may not even be enough space).
363
364  // QQQ I'd rather see this pushed down into last_frame_adjust
365  // and have it take the sender (aka caller).
366
367  if (deopt_sender.is_compiled_frame()) {
368    caller_adjustment = last_frame_adjust(0, callee_locals);
369  } else if (callee_locals > callee_parameters) {
370    // The caller frame may need extending to accommodate
371    // non-parameter locals of the first unpacked interpreted frame.
372    // Compute that adjustment.
373    caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
374  }
375
376
377  // If the sender is deoptimized the we must retrieve the address of the handler
378  // since the frame will "magically" show the original pc before the deopt
379  // and we'd undo the deopt.
380
381  frame_pcs[0] = deopt_sender.raw_pc();
382
383  assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
384
385  UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
386                                      caller_adjustment * BytesPerWord,
387                                      number_of_frames,
388                                      frame_sizes,
389                                      frame_pcs,
390                                      return_type);
391#if defined(IA32) || defined(AMD64)
392  // We need a way to pass fp to the unpacking code so the skeletal frames
393  // come out correct. This is only needed for x86 because of c2 using ebp
394  // as an allocatable register. So this update is useless (and harmless)
395  // on the other platforms. It would be nice to do this in a different
396  // way but even the old style deoptimization had a problem with deriving
397  // this value. NEEDS_CLEANUP
398  // Note: now that c1 is using c2's deopt blob we must do this on all
399  // x86 based platforms
400  intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
401  *fp_addr = array->sender().fp(); // was adapter_caller
402#endif /* IA32 || AMD64 */
403
404  if (array->frames() > 1) {
405    if (VerifyStack && TraceDeoptimization) {
406      tty->print_cr("Deoptimizing method containing inlining");
407    }
408  }
409
410  array->set_unroll_block(info);
411  return info;
412}
413
414// Called to cleanup deoptimization data structures in normal case
415// after unpacking to stack and when stack overflow error occurs
416void Deoptimization::cleanup_deopt_info(JavaThread *thread,
417                                        vframeArray *array) {
418
419  // Get array if coming from exception
420  if (array == NULL) {
421    array = thread->vframe_array_head();
422  }
423  thread->set_vframe_array_head(NULL);
424
425  // Free the previous UnrollBlock
426  vframeArray* old_array = thread->vframe_array_last();
427  thread->set_vframe_array_last(array);
428
429  if (old_array != NULL) {
430    UnrollBlock* old_info = old_array->unroll_block();
431    old_array->set_unroll_block(NULL);
432    delete old_info;
433    delete old_array;
434  }
435
436  // Deallocate any resource creating in this routine and any ResourceObjs allocated
437  // inside the vframeArray (StackValueCollections)
438
439  delete thread->deopt_mark();
440  thread->set_deopt_mark(NULL);
441
442
443  if (JvmtiExport::can_pop_frame()) {
444#ifndef CC_INTERP
445    // Regardless of whether we entered this routine with the pending
446    // popframe condition bit set, we should always clear it now
447    thread->clear_popframe_condition();
448#else
449    // C++ interpeter will clear has_pending_popframe when it enters
450    // with method_resume. For deopt_resume2 we clear it now.
451    if (thread->popframe_forcing_deopt_reexecution())
452        thread->clear_popframe_condition();
453#endif /* CC_INTERP */
454  }
455
456  // unpack_frames() is called at the end of the deoptimization handler
457  // and (in C2) at the end of the uncommon trap handler. Note this fact
458  // so that an asynchronous stack walker can work again. This counter is
459  // incremented at the beginning of fetch_unroll_info() and (in C2) at
460  // the beginning of uncommon_trap().
461  thread->dec_in_deopt_handler();
462}
463
464
465// Return BasicType of value being returned
466JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
467
468  // We are already active int he special DeoptResourceMark any ResourceObj's we
469  // allocate will be freed at the end of the routine.
470
471  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
472  // but makes the entry a little slower. There is however a little dance we have to
473  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
474  ResetNoHandleMark rnhm; // No-op in release/product versions
475  HandleMark hm;
476
477  frame stub_frame = thread->last_frame();
478
479  // Since the frame to unpack is the top frame of this thread, the vframe_array_head
480  // must point to the vframeArray for the unpack frame.
481  vframeArray* array = thread->vframe_array_head();
482
483#ifndef PRODUCT
484  if (TraceDeoptimization) {
485    tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
486  }
487#endif
488
489  UnrollBlock* info = array->unroll_block();
490
491  // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
492  array->unpack_to_stack(stub_frame, exec_mode);
493
494  BasicType bt = info->return_type();
495
496  // If we have an exception pending, claim that the return type is an oop
497  // so the deopt_blob does not overwrite the exception_oop.
498
499  if (exec_mode == Unpack_exception)
500    bt = T_OBJECT;
501
502  // Cleanup thread deopt data
503  cleanup_deopt_info(thread, array);
504
505#ifndef PRODUCT
506  if (VerifyStack) {
507    ResourceMark res_mark;
508
509    // Verify that the just-unpacked frames match the interpreter's
510    // notions of expression stack and locals
511    vframeArray* cur_array = thread->vframe_array_last();
512    RegisterMap rm(thread, false);
513    rm.set_include_argument_oops(false);
514    bool is_top_frame = true;
515    int callee_size_of_parameters = 0;
516    int callee_max_locals = 0;
517    for (int i = 0; i < cur_array->frames(); i++) {
518      vframeArrayElement* el = cur_array->element(i);
519      frame* iframe = el->iframe();
520      guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
521
522      // Get the oop map for this bci
523      InterpreterOopMap mask;
524      int cur_invoke_parameter_size = 0;
525      bool try_next_mask = false;
526      int next_mask_expression_stack_size = -1;
527      int top_frame_expression_stack_adjustment = 0;
528      methodHandle mh(thread, iframe->interpreter_frame_method());
529      OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
530      BytecodeStream str(mh);
531      str.set_start(iframe->interpreter_frame_bci());
532      int max_bci = mh->code_size();
533      // Get to the next bytecode if possible
534      assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
535      // Check to see if we can grab the number of outgoing arguments
536      // at an uncommon trap for an invoke (where the compiler
537      // generates debug info before the invoke has executed)
538      Bytecodes::Code cur_code = str.next();
539      if (cur_code == Bytecodes::_invokevirtual ||
540          cur_code == Bytecodes::_invokespecial ||
541          cur_code == Bytecodes::_invokestatic  ||
542          cur_code == Bytecodes::_invokeinterface) {
543        Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
544        symbolHandle signature(thread, invoke->signature());
545        ArgumentSizeComputer asc(signature);
546        cur_invoke_parameter_size = asc.size();
547        if (cur_code != Bytecodes::_invokestatic) {
548          // Add in receiver
549          ++cur_invoke_parameter_size;
550        }
551      }
552      if (str.bci() < max_bci) {
553        Bytecodes::Code bc = str.next();
554        if (bc >= 0) {
555          // The interpreter oop map generator reports results before
556          // the current bytecode has executed except in the case of
557          // calls. It seems to be hard to tell whether the compiler
558          // has emitted debug information matching the "state before"
559          // a given bytecode or the state after, so we try both
560          switch (cur_code) {
561            case Bytecodes::_invokevirtual:
562            case Bytecodes::_invokespecial:
563            case Bytecodes::_invokestatic:
564            case Bytecodes::_invokeinterface:
565            case Bytecodes::_athrow:
566              break;
567            default: {
568              InterpreterOopMap next_mask;
569              OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
570              next_mask_expression_stack_size = next_mask.expression_stack_size();
571              // Need to subtract off the size of the result type of
572              // the bytecode because this is not described in the
573              // debug info but returned to the interpreter in the TOS
574              // caching register
575              BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
576              if (bytecode_result_type != T_ILLEGAL) {
577                top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
578              }
579              assert(top_frame_expression_stack_adjustment >= 0, "");
580              try_next_mask = true;
581              break;
582            }
583          }
584        }
585      }
586
587      // Verify stack depth and oops in frame
588      // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
589      if (!(
590            /* SPARC */
591            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
592            /* x86 */
593            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
594            (try_next_mask &&
595             (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
596                                                                    top_frame_expression_stack_adjustment))) ||
597            (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
598            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
599             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
600            )) {
601        ttyLocker ttyl;
602
603        // Print out some information that will help us debug the problem
604        tty->print_cr("Wrong number of expression stack elements during deoptimization");
605        tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
606        tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
607                      iframe->interpreter_frame_expression_stack_size());
608        tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
609        tty->print_cr("  try_next_mask = %d", try_next_mask);
610        tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
611        tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
612        tty->print_cr("  callee_max_locals = %d", callee_max_locals);
613        tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
614        tty->print_cr("  exec_mode = %d", exec_mode);
615        tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
616        tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
617        tty->print_cr("  Interpreted frames:");
618        for (int k = 0; k < cur_array->frames(); k++) {
619          vframeArrayElement* el = cur_array->element(k);
620          tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
621        }
622        cur_array->print_on_2(tty);
623        guarantee(false, "wrong number of expression stack elements during deopt");
624      }
625      VerifyOopClosure verify;
626      iframe->oops_interpreted_do(&verify, &rm, false);
627      callee_size_of_parameters = mh->size_of_parameters();
628      callee_max_locals = mh->max_locals();
629      is_top_frame = false;
630    }
631  }
632#endif /* !PRODUCT */
633
634
635  return bt;
636JRT_END
637
638
639int Deoptimization::deoptimize_dependents() {
640  Threads::deoptimized_wrt_marked_nmethods();
641  return 0;
642}
643
644
645#ifdef COMPILER2
646bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
647  Handle pending_exception(thread->pending_exception());
648  const char* exception_file = thread->exception_file();
649  int exception_line = thread->exception_line();
650  thread->clear_pending_exception();
651
652  for (int i = 0; i < objects->length(); i++) {
653    assert(objects->at(i)->is_object(), "invalid debug information");
654    ObjectValue* sv = (ObjectValue*) objects->at(i);
655
656    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
657    oop obj = NULL;
658
659    if (k->oop_is_instance()) {
660      instanceKlass* ik = instanceKlass::cast(k());
661      obj = ik->allocate_instance(CHECK_(false));
662    } else if (k->oop_is_typeArray()) {
663      typeArrayKlass* ak = typeArrayKlass::cast(k());
664      assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
665      int len = sv->field_size() / type2size[ak->element_type()];
666      obj = ak->allocate(len, CHECK_(false));
667    } else if (k->oop_is_objArray()) {
668      objArrayKlass* ak = objArrayKlass::cast(k());
669      obj = ak->allocate(sv->field_size(), CHECK_(false));
670    }
671
672    assert(obj != NULL, "allocation failed");
673    assert(sv->value().is_null(), "redundant reallocation");
674    sv->set_value(obj);
675  }
676
677  if (pending_exception.not_null()) {
678    thread->set_pending_exception(pending_exception(), exception_file, exception_line);
679  }
680
681  return true;
682}
683
684// This assumes that the fields are stored in ObjectValue in the same order
685// they are yielded by do_nonstatic_fields.
686class FieldReassigner: public FieldClosure {
687  frame* _fr;
688  RegisterMap* _reg_map;
689  ObjectValue* _sv;
690  instanceKlass* _ik;
691  oop _obj;
692
693  int _i;
694public:
695  FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
696    _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
697
698  int i() const { return _i; }
699
700
701  void do_field(fieldDescriptor* fd) {
702    intptr_t val;
703    StackValue* value =
704      StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
705    int offset = fd->offset();
706    switch (fd->field_type()) {
707    case T_OBJECT: case T_ARRAY:
708      assert(value->type() == T_OBJECT, "Agreement.");
709      _obj->obj_field_put(offset, value->get_obj()());
710      break;
711
712    case T_LONG: case T_DOUBLE: {
713      assert(value->type() == T_INT, "Agreement.");
714      StackValue* low =
715        StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
716#ifdef _LP64
717      jlong res = (jlong)low->get_int();
718#else
719#ifdef SPARC
720      // For SPARC we have to swap high and low words.
721      jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
722#else
723      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
724#endif //SPARC
725#endif
726      _obj->long_field_put(offset, res);
727      break;
728    }
729    // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
730    case T_INT: case T_FLOAT: // 4 bytes.
731      assert(value->type() == T_INT, "Agreement.");
732      val = value->get_int();
733      _obj->int_field_put(offset, (jint)*((jint*)&val));
734      break;
735
736    case T_SHORT: case T_CHAR: // 2 bytes
737      assert(value->type() == T_INT, "Agreement.");
738      val = value->get_int();
739      _obj->short_field_put(offset, (jshort)*((jint*)&val));
740      break;
741
742    case T_BOOLEAN: case T_BYTE: // 1 byte
743      assert(value->type() == T_INT, "Agreement.");
744      val = value->get_int();
745      _obj->bool_field_put(offset, (jboolean)*((jint*)&val));
746      break;
747
748    default:
749      ShouldNotReachHere();
750    }
751    _i++;
752  }
753};
754
755// restore elements of an eliminated type array
756void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
757  int index = 0;
758  intptr_t val;
759
760  for (int i = 0; i < sv->field_size(); i++) {
761    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
762    switch(type) {
763    case T_LONG: case T_DOUBLE: {
764      assert(value->type() == T_INT, "Agreement.");
765      StackValue* low =
766        StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
767#ifdef _LP64
768      jlong res = (jlong)low->get_int();
769#else
770#ifdef SPARC
771      // For SPARC we have to swap high and low words.
772      jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
773#else
774      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
775#endif //SPARC
776#endif
777      obj->long_at_put(index, res);
778      break;
779    }
780
781    // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
782    case T_INT: case T_FLOAT: // 4 bytes.
783      assert(value->type() == T_INT, "Agreement.");
784      val = value->get_int();
785      obj->int_at_put(index, (jint)*((jint*)&val));
786      break;
787
788    case T_SHORT: case T_CHAR: // 2 bytes
789      assert(value->type() == T_INT, "Agreement.");
790      val = value->get_int();
791      obj->short_at_put(index, (jshort)*((jint*)&val));
792      break;
793
794    case T_BOOLEAN: case T_BYTE: // 1 byte
795      assert(value->type() == T_INT, "Agreement.");
796      val = value->get_int();
797      obj->bool_at_put(index, (jboolean)*((jint*)&val));
798      break;
799
800      default:
801        ShouldNotReachHere();
802    }
803    index++;
804  }
805}
806
807
808// restore fields of an eliminated object array
809void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
810  for (int i = 0; i < sv->field_size(); i++) {
811    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
812    assert(value->type() == T_OBJECT, "object element expected");
813    obj->obj_at_put(i, value->get_obj()());
814  }
815}
816
817
818// restore fields of all eliminated objects and arrays
819void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
820  for (int i = 0; i < objects->length(); i++) {
821    ObjectValue* sv = (ObjectValue*) objects->at(i);
822    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
823    Handle obj = sv->value();
824    assert(obj.not_null(), "reallocation was missed");
825
826    if (k->oop_is_instance()) {
827      instanceKlass* ik = instanceKlass::cast(k());
828      FieldReassigner reassign(fr, reg_map, sv, obj());
829      ik->do_nonstatic_fields(&reassign);
830    } else if (k->oop_is_typeArray()) {
831      typeArrayKlass* ak = typeArrayKlass::cast(k());
832      reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
833    } else if (k->oop_is_objArray()) {
834      reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
835    }
836  }
837}
838
839
840// relock objects for which synchronization was eliminated
841void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
842  for (int i = 0; i < monitors->length(); i++) {
843    MonitorInfo* mon_info = monitors->at(i);
844    if (mon_info->eliminated()) {
845      assert(mon_info->owner() != NULL, "reallocation was missed");
846      Handle obj = Handle(mon_info->owner());
847      markOop mark = obj->mark();
848      if (UseBiasedLocking && mark->has_bias_pattern()) {
849        // New allocated objects may have the mark set to anonymously biased.
850        // Also the deoptimized method may called methods with synchronization
851        // where the thread-local object is bias locked to the current thread.
852        assert(mark->is_biased_anonymously() ||
853               mark->biased_locker() == thread, "should be locked to current thread");
854        // Reset mark word to unbiased prototype.
855        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
856        obj->set_mark(unbiased_prototype);
857      }
858      BasicLock* lock = mon_info->lock();
859      ObjectSynchronizer::slow_enter(obj, lock, thread);
860    }
861    assert(mon_info->owner()->is_locked(), "object must be locked now");
862  }
863}
864
865
866#ifndef PRODUCT
867// print information about reallocated objects
868void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
869  fieldDescriptor fd;
870
871  for (int i = 0; i < objects->length(); i++) {
872    ObjectValue* sv = (ObjectValue*) objects->at(i);
873    KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
874    Handle obj = sv->value();
875
876    tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
877    k->as_klassOop()->print_value();
878    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
879    tty->cr();
880
881    if (Verbose) {
882      k->oop_print_on(obj(), tty);
883    }
884  }
885}
886#endif
887#endif // COMPILER2
888
889vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
890
891#ifndef PRODUCT
892  if (TraceDeoptimization) {
893    ttyLocker ttyl;
894    tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
895    fr.print_on(tty);
896    tty->print_cr("     Virtual frames (innermost first):");
897    for (int index = 0; index < chunk->length(); index++) {
898      compiledVFrame* vf = chunk->at(index);
899      tty->print("       %2d - ", index);
900      vf->print_value();
901      int bci = chunk->at(index)->raw_bci();
902      const char* code_name;
903      if (bci == SynchronizationEntryBCI) {
904        code_name = "sync entry";
905      } else {
906        Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
907        code_name = Bytecodes::name(code);
908      }
909      tty->print(" - %s", code_name);
910      tty->print_cr(" @ bci %d ", bci);
911      if (Verbose) {
912        vf->print();
913        tty->cr();
914      }
915    }
916  }
917#endif
918
919  // Register map for next frame (used for stack crawl).  We capture
920  // the state of the deopt'ing frame's caller.  Thus if we need to
921  // stuff a C2I adapter we can properly fill in the callee-save
922  // register locations.
923  frame caller = fr.sender(reg_map);
924  int frame_size = caller.sp() - fr.sp();
925
926  frame sender = caller;
927
928  // Since the Java thread being deoptimized will eventually adjust it's own stack,
929  // the vframeArray containing the unpacking information is allocated in the C heap.
930  // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
931  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
932
933  // Compare the vframeArray to the collected vframes
934  assert(array->structural_compare(thread, chunk), "just checking");
935  Events::log("# vframes = %d", (intptr_t)chunk->length());
936
937#ifndef PRODUCT
938  if (TraceDeoptimization) {
939    ttyLocker ttyl;
940    tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
941  }
942#endif // PRODUCT
943
944  return array;
945}
946
947
948static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
949  GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
950  for (int i = 0; i < monitors->length(); i++) {
951    MonitorInfo* mon_info = monitors->at(i);
952    if (!mon_info->eliminated() && mon_info->owner() != NULL) {
953      objects_to_revoke->append(Handle(mon_info->owner()));
954    }
955  }
956}
957
958
959void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
960  if (!UseBiasedLocking) {
961    return;
962  }
963
964  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
965
966  // Unfortunately we don't have a RegisterMap available in most of
967  // the places we want to call this routine so we need to walk the
968  // stack again to update the register map.
969  if (map == NULL || !map->update_map()) {
970    StackFrameStream sfs(thread, true);
971    bool found = false;
972    while (!found && !sfs.is_done()) {
973      frame* cur = sfs.current();
974      sfs.next();
975      found = cur->id() == fr.id();
976    }
977    assert(found, "frame to be deoptimized not found on target thread's stack");
978    map = sfs.register_map();
979  }
980
981  vframe* vf = vframe::new_vframe(&fr, map, thread);
982  compiledVFrame* cvf = compiledVFrame::cast(vf);
983  // Revoke monitors' biases in all scopes
984  while (!cvf->is_top()) {
985    collect_monitors(cvf, objects_to_revoke);
986    cvf = compiledVFrame::cast(cvf->sender());
987  }
988  collect_monitors(cvf, objects_to_revoke);
989
990  if (SafepointSynchronize::is_at_safepoint()) {
991    BiasedLocking::revoke_at_safepoint(objects_to_revoke);
992  } else {
993    BiasedLocking::revoke(objects_to_revoke);
994  }
995}
996
997
998void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
999  if (!UseBiasedLocking) {
1000    return;
1001  }
1002
1003  assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
1004  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1005  for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
1006    if (jt->has_last_Java_frame()) {
1007      StackFrameStream sfs(jt, true);
1008      while (!sfs.is_done()) {
1009        frame* cur = sfs.current();
1010        if (cb->contains(cur->pc())) {
1011          vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
1012          compiledVFrame* cvf = compiledVFrame::cast(vf);
1013          // Revoke monitors' biases in all scopes
1014          while (!cvf->is_top()) {
1015            collect_monitors(cvf, objects_to_revoke);
1016            cvf = compiledVFrame::cast(cvf->sender());
1017          }
1018          collect_monitors(cvf, objects_to_revoke);
1019        }
1020        sfs.next();
1021      }
1022    }
1023  }
1024  BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1025}
1026
1027
1028void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
1029  assert(fr.can_be_deoptimized(), "checking frame type");
1030
1031  gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
1032
1033  EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
1034
1035  // Patch the nmethod so that when execution returns to it we will
1036  // deopt the execution state and return to the interpreter.
1037  fr.deoptimize(thread);
1038}
1039
1040void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1041  // Deoptimize only if the frame comes from compile code.
1042  // Do not deoptimize the frame which is already patched
1043  // during the execution of the loops below.
1044  if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1045    return;
1046  }
1047  ResourceMark rm;
1048  DeoptimizationMarker dm;
1049  if (UseBiasedLocking) {
1050    revoke_biases_of_monitors(thread, fr, map);
1051  }
1052  deoptimize_single_frame(thread, fr);
1053
1054}
1055
1056
1057void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1058  // Compute frame and register map based on thread and sp.
1059  RegisterMap reg_map(thread, UseBiasedLocking);
1060  frame fr = thread->last_frame();
1061  while (fr.id() != id) {
1062    fr = fr.sender(&reg_map);
1063  }
1064  deoptimize(thread, fr, &reg_map);
1065}
1066
1067
1068// JVMTI PopFrame support
1069JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1070{
1071  thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1072}
1073JRT_END
1074
1075
1076#ifdef COMPILER2
1077void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1078  // in case of an unresolved klass entry, load the class.
1079  if (constant_pool->tag_at(index).is_unresolved_klass()) {
1080    klassOop tk = constant_pool->klass_at(index, CHECK);
1081    return;
1082  }
1083
1084  if (!constant_pool->tag_at(index).is_symbol()) return;
1085
1086  Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
1087  symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
1088
1089  // class name?
1090  if (symbol->byte_at(0) != '(') {
1091    Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1092    SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1093    return;
1094  }
1095
1096  // then it must be a signature!
1097  for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1098    if (ss.is_object()) {
1099      symbolOop s = ss.as_symbol(CHECK);
1100      symbolHandle class_name (THREAD, s);
1101      Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
1102      SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1103    }
1104  }
1105}
1106
1107
1108void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
1109  EXCEPTION_MARK;
1110  load_class_by_index(constant_pool, index, THREAD);
1111  if (HAS_PENDING_EXCEPTION) {
1112    // Exception happened during classloading. We ignore the exception here, since it
1113    // is going to be rethrown since the current activation is going to be deoptimzied and
1114    // the interpreter will re-execute the bytecode.
1115    CLEAR_PENDING_EXCEPTION;
1116  }
1117}
1118
1119JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1120  HandleMark hm;
1121
1122  // uncommon_trap() is called at the beginning of the uncommon trap
1123  // handler. Note this fact before we start generating temporary frames
1124  // that can confuse an asynchronous stack walker. This counter is
1125  // decremented at the end of unpack_frames().
1126  thread->inc_in_deopt_handler();
1127
1128  // We need to update the map if we have biased locking.
1129  RegisterMap reg_map(thread, UseBiasedLocking);
1130  frame stub_frame = thread->last_frame();
1131  frame fr = stub_frame.sender(&reg_map);
1132  // Make sure the calling nmethod is not getting deoptimized and removed
1133  // before we are done with it.
1134  nmethodLocker nl(fr.pc());
1135
1136  {
1137    ResourceMark rm;
1138
1139    // Revoke biases of any monitors in the frame to ensure we can migrate them
1140    revoke_biases_of_monitors(thread, fr, &reg_map);
1141
1142    DeoptReason reason = trap_request_reason(trap_request);
1143    DeoptAction action = trap_request_action(trap_request);
1144    jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1145
1146    Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
1147    vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1148    compiledVFrame* cvf = compiledVFrame::cast(vf);
1149
1150    nmethod* nm = cvf->code();
1151
1152    ScopeDesc*      trap_scope  = cvf->scope();
1153    methodHandle    trap_method = trap_scope->method();
1154    int             trap_bci    = trap_scope->bci();
1155    Bytecodes::Code trap_bc     = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
1156
1157    // Record this event in the histogram.
1158    gather_statistics(reason, action, trap_bc);
1159
1160    // Ensure that we can record deopt. history:
1161    bool create_if_missing = ProfileTraps;
1162
1163    methodDataHandle trap_mdo
1164      (THREAD, get_method_data(thread, trap_method, create_if_missing));
1165
1166    // Print a bunch of diagnostics, if requested.
1167    if (TraceDeoptimization || LogCompilation) {
1168      ResourceMark rm;
1169      ttyLocker ttyl;
1170      char buf[100];
1171      if (xtty != NULL) {
1172        xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
1173                         os::current_thread_id(),
1174                         format_trap_request(buf, sizeof(buf), trap_request));
1175        nm->log_identity(xtty);
1176      }
1177      symbolHandle class_name;
1178      bool unresolved = false;
1179      if (unloaded_class_index >= 0) {
1180        constantPoolHandle constants (THREAD, trap_method->constants());
1181        if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1182          class_name = symbolHandle(THREAD,
1183            constants->klass_name_at(unloaded_class_index));
1184          unresolved = true;
1185          if (xtty != NULL)
1186            xtty->print(" unresolved='1'");
1187        } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1188          class_name = symbolHandle(THREAD,
1189            constants->symbol_at(unloaded_class_index));
1190        }
1191        if (xtty != NULL)
1192          xtty->name(class_name);
1193      }
1194      if (xtty != NULL && trap_mdo.not_null()) {
1195        // Dump the relevant MDO state.
1196        // This is the deopt count for the current reason, any previous
1197        // reasons or recompiles seen at this point.
1198        int dcnt = trap_mdo->trap_count(reason);
1199        if (dcnt != 0)
1200          xtty->print(" count='%d'", dcnt);
1201        ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1202        int dos = (pdata == NULL)? 0: pdata->trap_state();
1203        if (dos != 0) {
1204          xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1205          if (trap_state_is_recompiled(dos)) {
1206            int recnt2 = trap_mdo->overflow_recompile_count();
1207            if (recnt2 != 0)
1208              xtty->print(" recompiles2='%d'", recnt2);
1209          }
1210        }
1211      }
1212      if (xtty != NULL) {
1213        xtty->stamp();
1214        xtty->end_head();
1215      }
1216      if (TraceDeoptimization) {  // make noise on the tty
1217        tty->print("Uncommon trap occurred in");
1218        nm->method()->print_short_name(tty);
1219        tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
1220                   fr.pc(),
1221                   (int) os::current_thread_id(),
1222                   trap_reason_name(reason),
1223                   trap_action_name(action),
1224                   unloaded_class_index);
1225        if (class_name.not_null()) {
1226          tty->print(unresolved ? " unresolved class: " : " symbol: ");
1227          class_name->print_symbol_on(tty);
1228        }
1229        tty->cr();
1230      }
1231      if (xtty != NULL) {
1232        // Log the precise location of the trap.
1233        for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1234          xtty->begin_elem("jvms bci='%d'", sd->bci());
1235          xtty->method(sd->method());
1236          xtty->end_elem();
1237          if (sd->is_top())  break;
1238        }
1239        xtty->tail("uncommon_trap");
1240      }
1241    }
1242    // (End diagnostic printout.)
1243
1244    // Load class if necessary
1245    if (unloaded_class_index >= 0) {
1246      constantPoolHandle constants(THREAD, trap_method->constants());
1247      load_class_by_index(constants, unloaded_class_index);
1248    }
1249
1250    // Flush the nmethod if necessary and desirable.
1251    //
1252    // We need to avoid situations where we are re-flushing the nmethod
1253    // because of a hot deoptimization site.  Repeated flushes at the same
1254    // point need to be detected by the compiler and avoided.  If the compiler
1255    // cannot avoid them (or has a bug and "refuses" to avoid them), this
1256    // module must take measures to avoid an infinite cycle of recompilation
1257    // and deoptimization.  There are several such measures:
1258    //
1259    //   1. If a recompilation is ordered a second time at some site X
1260    //   and for the same reason R, the action is adjusted to 'reinterpret',
1261    //   to give the interpreter time to exercise the method more thoroughly.
1262    //   If this happens, the method's overflow_recompile_count is incremented.
1263    //
1264    //   2. If the compiler fails to reduce the deoptimization rate, then
1265    //   the method's overflow_recompile_count will begin to exceed the set
1266    //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1267    //   is adjusted to 'make_not_compilable', and the method is abandoned
1268    //   to the interpreter.  This is a performance hit for hot methods,
1269    //   but is better than a disastrous infinite cycle of recompilations.
1270    //   (Actually, only the method containing the site X is abandoned.)
1271    //
1272    //   3. In parallel with the previous measures, if the total number of
1273    //   recompilations of a method exceeds the much larger set limit
1274    //   PerMethodRecompilationCutoff, the method is abandoned.
1275    //   This should only happen if the method is very large and has
1276    //   many "lukewarm" deoptimizations.  The code which enforces this
1277    //   limit is elsewhere (class nmethod, class methodOopDesc).
1278    //
1279    // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1280    // to recompile at each bytecode independently of the per-BCI cutoff.
1281    //
1282    // The decision to update code is up to the compiler, and is encoded
1283    // in the Action_xxx code.  If the compiler requests Action_none
1284    // no trap state is changed, no compiled code is changed, and the
1285    // computation suffers along in the interpreter.
1286    //
1287    // The other action codes specify various tactics for decompilation
1288    // and recompilation.  Action_maybe_recompile is the loosest, and
1289    // allows the compiled code to stay around until enough traps are seen,
1290    // and until the compiler gets around to recompiling the trapping method.
1291    //
1292    // The other actions cause immediate removal of the present code.
1293
1294    bool update_trap_state = true;
1295    bool make_not_entrant = false;
1296    bool make_not_compilable = false;
1297    bool reset_counters = false;
1298    switch (action) {
1299    case Action_none:
1300      // Keep the old code.
1301      update_trap_state = false;
1302      break;
1303    case Action_maybe_recompile:
1304      // Do not need to invalidate the present code, but we can
1305      // initiate another
1306      // Start compiler without (necessarily) invalidating the nmethod.
1307      // The system will tolerate the old code, but new code should be
1308      // generated when possible.
1309      break;
1310    case Action_reinterpret:
1311      // Go back into the interpreter for a while, and then consider
1312      // recompiling form scratch.
1313      make_not_entrant = true;
1314      // Reset invocation counter for outer most method.
1315      // This will allow the interpreter to exercise the bytecodes
1316      // for a while before recompiling.
1317      // By contrast, Action_make_not_entrant is immediate.
1318      //
1319      // Note that the compiler will track null_check, null_assert,
1320      // range_check, and class_check events and log them as if they
1321      // had been traps taken from compiled code.  This will update
1322      // the MDO trap history so that the next compilation will
1323      // properly detect hot trap sites.
1324      reset_counters = true;
1325      break;
1326    case Action_make_not_entrant:
1327      // Request immediate recompilation, and get rid of the old code.
1328      // Make them not entrant, so next time they are called they get
1329      // recompiled.  Unloaded classes are loaded now so recompile before next
1330      // time they are called.  Same for uninitialized.  The interpreter will
1331      // link the missing class, if any.
1332      make_not_entrant = true;
1333      break;
1334    case Action_make_not_compilable:
1335      // Give up on compiling this method at all.
1336      make_not_entrant = true;
1337      make_not_compilable = true;
1338      break;
1339    default:
1340      ShouldNotReachHere();
1341    }
1342
1343    // Setting +ProfileTraps fixes the following, on all platforms:
1344    // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1345    // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1346    // recompile relies on a methodDataOop to record heroic opt failures.
1347
1348    // Whether the interpreter is producing MDO data or not, we also need
1349    // to use the MDO to detect hot deoptimization points and control
1350    // aggressive optimization.
1351    bool inc_recompile_count = false;
1352    ProfileData* pdata = NULL;
1353    if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
1354      assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
1355      uint this_trap_count = 0;
1356      bool maybe_prior_trap = false;
1357      bool maybe_prior_recompile = false;
1358      pdata = query_update_method_data(trap_mdo, trap_bci, reason,
1359                                   //outputs:
1360                                   this_trap_count,
1361                                   maybe_prior_trap,
1362                                   maybe_prior_recompile);
1363      // Because the interpreter also counts null, div0, range, and class
1364      // checks, these traps from compiled code are double-counted.
1365      // This is harmless; it just means that the PerXTrapLimit values
1366      // are in effect a little smaller than they look.
1367
1368      DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1369      if (per_bc_reason != Reason_none) {
1370        // Now take action based on the partially known per-BCI history.
1371        if (maybe_prior_trap
1372            && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1373          // If there are too many traps at this BCI, force a recompile.
1374          // This will allow the compiler to see the limit overflow, and
1375          // take corrective action, if possible.  The compiler generally
1376          // does not use the exact PerBytecodeTrapLimit value, but instead
1377          // changes its tactics if it sees any traps at all.  This provides
1378          // a little hysteresis, delaying a recompile until a trap happens
1379          // several times.
1380          //
1381          // Actually, since there is only one bit of counter per BCI,
1382          // the possible per-BCI counts are {0,1,(per-method count)}.
1383          // This produces accurate results if in fact there is only
1384          // one hot trap site, but begins to get fuzzy if there are
1385          // many sites.  For example, if there are ten sites each
1386          // trapping two or more times, they each get the blame for
1387          // all of their traps.
1388          make_not_entrant = true;
1389        }
1390
1391        // Detect repeated recompilation at the same BCI, and enforce a limit.
1392        if (make_not_entrant && maybe_prior_recompile) {
1393          // More than one recompile at this point.
1394          inc_recompile_count = maybe_prior_trap;
1395        }
1396      } else {
1397        // For reasons which are not recorded per-bytecode, we simply
1398        // force recompiles unconditionally.
1399        // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1400        make_not_entrant = true;
1401      }
1402
1403      // Go back to the compiler if there are too many traps in this method.
1404      if (this_trap_count >= (uint)PerMethodTrapLimit) {
1405        // If there are too many traps in this method, force a recompile.
1406        // This will allow the compiler to see the limit overflow, and
1407        // take corrective action, if possible.
1408        // (This condition is an unlikely backstop only, because the
1409        // PerBytecodeTrapLimit is more likely to take effect first,
1410        // if it is applicable.)
1411        make_not_entrant = true;
1412      }
1413
1414      // Here's more hysteresis:  If there has been a recompile at
1415      // this trap point already, run the method in the interpreter
1416      // for a while to exercise it more thoroughly.
1417      if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1418        reset_counters = true;
1419      }
1420
1421    }
1422
1423    // Take requested actions on the method:
1424
1425    // Recompile
1426    if (make_not_entrant) {
1427      if (!nm->make_not_entrant()) {
1428        return; // the call did not change nmethod's state
1429      }
1430
1431      if (pdata != NULL) {
1432        // Record the recompilation event, if any.
1433        int tstate0 = pdata->trap_state();
1434        int tstate1 = trap_state_set_recompiled(tstate0, true);
1435        if (tstate1 != tstate0)
1436          pdata->set_trap_state(tstate1);
1437      }
1438    }
1439
1440    if (inc_recompile_count) {
1441      trap_mdo->inc_overflow_recompile_count();
1442      if ((uint)trap_mdo->overflow_recompile_count() >
1443          (uint)PerBytecodeRecompilationCutoff) {
1444        // Give up on the method containing the bad BCI.
1445        if (trap_method() == nm->method()) {
1446          make_not_compilable = true;
1447        } else {
1448          trap_method->set_not_compilable();
1449          // But give grace to the enclosing nm->method().
1450        }
1451      }
1452    }
1453
1454    // Reset invocation counters
1455    if (reset_counters) {
1456      if (nm->is_osr_method())
1457        reset_invocation_counter(trap_scope, CompileThreshold);
1458      else
1459        reset_invocation_counter(trap_scope);
1460    }
1461
1462    // Give up compiling
1463    if (make_not_compilable && !nm->method()->is_not_compilable()) {
1464      assert(make_not_entrant, "consistent");
1465      nm->method()->set_not_compilable();
1466    }
1467
1468  } // Free marked resources
1469
1470}
1471JRT_END
1472
1473methodDataOop
1474Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
1475                                bool create_if_missing) {
1476  Thread* THREAD = thread;
1477  methodDataOop mdo = m()->method_data();
1478  if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1479    // Build an MDO.  Ignore errors like OutOfMemory;
1480    // that simply means we won't have an MDO to update.
1481    methodOopDesc::build_interpreter_method_data(m, THREAD);
1482    if (HAS_PENDING_EXCEPTION) {
1483      assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1484      CLEAR_PENDING_EXCEPTION;
1485    }
1486    mdo = m()->method_data();
1487  }
1488  return mdo;
1489}
1490
1491ProfileData*
1492Deoptimization::query_update_method_data(methodDataHandle trap_mdo,
1493                                         int trap_bci,
1494                                         Deoptimization::DeoptReason reason,
1495                                         //outputs:
1496                                         uint& ret_this_trap_count,
1497                                         bool& ret_maybe_prior_trap,
1498                                         bool& ret_maybe_prior_recompile) {
1499  uint prior_trap_count = trap_mdo->trap_count(reason);
1500  uint this_trap_count  = trap_mdo->inc_trap_count(reason);
1501
1502  // If the runtime cannot find a place to store trap history,
1503  // it is estimated based on the general condition of the method.
1504  // If the method has ever been recompiled, or has ever incurred
1505  // a trap with the present reason , then this BCI is assumed
1506  // (pessimistically) to be the culprit.
1507  bool maybe_prior_trap      = (prior_trap_count != 0);
1508  bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
1509  ProfileData* pdata = NULL;
1510
1511
1512  // For reasons which are recorded per bytecode, we check per-BCI data.
1513  DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1514  if (per_bc_reason != Reason_none) {
1515    // Find the profile data for this BCI.  If there isn't one,
1516    // try to allocate one from the MDO's set of spares.
1517    // This will let us detect a repeated trap at this point.
1518    pdata = trap_mdo->allocate_bci_to_data(trap_bci);
1519
1520    if (pdata != NULL) {
1521      // Query the trap state of this profile datum.
1522      int tstate0 = pdata->trap_state();
1523      if (!trap_state_has_reason(tstate0, per_bc_reason))
1524        maybe_prior_trap = false;
1525      if (!trap_state_is_recompiled(tstate0))
1526        maybe_prior_recompile = false;
1527
1528      // Update the trap state of this profile datum.
1529      int tstate1 = tstate0;
1530      // Record the reason.
1531      tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
1532      // Store the updated state on the MDO, for next time.
1533      if (tstate1 != tstate0)
1534        pdata->set_trap_state(tstate1);
1535    } else {
1536      if (LogCompilation && xtty != NULL) {
1537        ttyLocker ttyl;
1538        // Missing MDP?  Leave a small complaint in the log.
1539        xtty->elem("missing_mdp bci='%d'", trap_bci);
1540      }
1541    }
1542  }
1543
1544  // Return results:
1545  ret_this_trap_count = this_trap_count;
1546  ret_maybe_prior_trap = maybe_prior_trap;
1547  ret_maybe_prior_recompile = maybe_prior_recompile;
1548  return pdata;
1549}
1550
1551void
1552Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1553  ResourceMark rm;
1554  // Ignored outputs:
1555  uint ignore_this_trap_count;
1556  bool ignore_maybe_prior_trap;
1557  bool ignore_maybe_prior_recompile;
1558  query_update_method_data(trap_mdo, trap_bci,
1559                           (DeoptReason)reason,
1560                           ignore_this_trap_count,
1561                           ignore_maybe_prior_trap,
1562                           ignore_maybe_prior_recompile);
1563}
1564
1565void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
1566  ScopeDesc* sd = trap_scope;
1567  for (; !sd->is_top(); sd = sd->sender()) {
1568    // Reset ICs of inlined methods, since they can trigger compilations also.
1569    sd->method()->invocation_counter()->reset();
1570  }
1571  InvocationCounter* c = sd->method()->invocation_counter();
1572  if (top_count != _no_count) {
1573    // It was an OSR method, so bump the count higher.
1574    c->set(c->state(), top_count);
1575  } else {
1576    c->reset();
1577  }
1578  sd->method()->backedge_counter()->reset();
1579}
1580
1581Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
1582
1583  // Still in Java no safepoints
1584  {
1585    // This enters VM and may safepoint
1586    uncommon_trap_inner(thread, trap_request);
1587  }
1588  return fetch_unroll_info_helper(thread);
1589}
1590
1591// Local derived constants.
1592// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
1593const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
1594const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
1595
1596//---------------------------trap_state_reason---------------------------------
1597Deoptimization::DeoptReason
1598Deoptimization::trap_state_reason(int trap_state) {
1599  // This assert provides the link between the width of DataLayout::trap_bits
1600  // and the encoding of "recorded" reasons.  It ensures there are enough
1601  // bits to store all needed reasons in the per-BCI MDO profile.
1602  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1603  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1604  trap_state -= recompile_bit;
1605  if (trap_state == DS_REASON_MASK) {
1606    return Reason_many;
1607  } else {
1608    assert((int)Reason_none == 0, "state=0 => Reason_none");
1609    return (DeoptReason)trap_state;
1610  }
1611}
1612//-------------------------trap_state_has_reason-------------------------------
1613int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1614  assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
1615  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1616  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1617  trap_state -= recompile_bit;
1618  if (trap_state == DS_REASON_MASK) {
1619    return -1;  // true, unspecifically (bottom of state lattice)
1620  } else if (trap_state == reason) {
1621    return 1;   // true, definitely
1622  } else if (trap_state == 0) {
1623    return 0;   // false, definitely (top of state lattice)
1624  } else {
1625    return 0;   // false, definitely
1626  }
1627}
1628//-------------------------trap_state_add_reason-------------------------------
1629int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
1630  assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
1631  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1632  trap_state -= recompile_bit;
1633  if (trap_state == DS_REASON_MASK) {
1634    return trap_state + recompile_bit;     // already at state lattice bottom
1635  } else if (trap_state == reason) {
1636    return trap_state + recompile_bit;     // the condition is already true
1637  } else if (trap_state == 0) {
1638    return reason + recompile_bit;          // no condition has yet been true
1639  } else {
1640    return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
1641  }
1642}
1643//-----------------------trap_state_is_recompiled------------------------------
1644bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1645  return (trap_state & DS_RECOMPILE_BIT) != 0;
1646}
1647//-----------------------trap_state_set_recompiled-----------------------------
1648int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
1649  if (z)  return trap_state |  DS_RECOMPILE_BIT;
1650  else    return trap_state & ~DS_RECOMPILE_BIT;
1651}
1652//---------------------------format_trap_state---------------------------------
1653// This is used for debugging and diagnostics, including hotspot.log output.
1654const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1655                                              int trap_state) {
1656  DeoptReason reason      = trap_state_reason(trap_state);
1657  bool        recomp_flag = trap_state_is_recompiled(trap_state);
1658  // Re-encode the state from its decoded components.
1659  int decoded_state = 0;
1660  if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
1661    decoded_state = trap_state_add_reason(decoded_state, reason);
1662  if (recomp_flag)
1663    decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
1664  // If the state re-encodes properly, format it symbolically.
1665  // Because this routine is used for debugging and diagnostics,
1666  // be robust even if the state is a strange value.
1667  size_t len;
1668  if (decoded_state != trap_state) {
1669    // Random buggy state that doesn't decode??
1670    len = jio_snprintf(buf, buflen, "#%d", trap_state);
1671  } else {
1672    len = jio_snprintf(buf, buflen, "%s%s",
1673                       trap_reason_name(reason),
1674                       recomp_flag ? " recompiled" : "");
1675  }
1676  if (len >= buflen)
1677    buf[buflen-1] = '\0';
1678  return buf;
1679}
1680
1681
1682//--------------------------------statics--------------------------------------
1683Deoptimization::DeoptAction Deoptimization::_unloaded_action
1684  = Deoptimization::Action_reinterpret;
1685const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
1686  // Note:  Keep this in sync. with enum DeoptReason.
1687  "none",
1688  "null_check",
1689  "null_assert",
1690  "range_check",
1691  "class_check",
1692  "array_check",
1693  "intrinsic",
1694  "bimorphic",
1695  "unloaded",
1696  "uninitialized",
1697  "unreached",
1698  "unhandled",
1699  "constraint",
1700  "div0_check",
1701  "age",
1702  "predicate"
1703};
1704const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
1705  // Note:  Keep this in sync. with enum DeoptAction.
1706  "none",
1707  "maybe_recompile",
1708  "reinterpret",
1709  "make_not_entrant",
1710  "make_not_compilable"
1711};
1712
1713const char* Deoptimization::trap_reason_name(int reason) {
1714  if (reason == Reason_many)  return "many";
1715  if ((uint)reason < Reason_LIMIT)
1716    return _trap_reason_name[reason];
1717  static char buf[20];
1718  sprintf(buf, "reason%d", reason);
1719  return buf;
1720}
1721const char* Deoptimization::trap_action_name(int action) {
1722  if ((uint)action < Action_LIMIT)
1723    return _trap_action_name[action];
1724  static char buf[20];
1725  sprintf(buf, "action%d", action);
1726  return buf;
1727}
1728
1729// This is used for debugging and diagnostics, including hotspot.log output.
1730const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
1731                                                int trap_request) {
1732  jint unloaded_class_index = trap_request_index(trap_request);
1733  const char* reason = trap_reason_name(trap_request_reason(trap_request));
1734  const char* action = trap_action_name(trap_request_action(trap_request));
1735  size_t len;
1736  if (unloaded_class_index < 0) {
1737    len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
1738                       reason, action);
1739  } else {
1740    len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
1741                       reason, action, unloaded_class_index);
1742  }
1743  if (len >= buflen)
1744    buf[buflen-1] = '\0';
1745  return buf;
1746}
1747
1748juint Deoptimization::_deoptimization_hist
1749        [Deoptimization::Reason_LIMIT]
1750    [1 + Deoptimization::Action_LIMIT]
1751        [Deoptimization::BC_CASE_LIMIT]
1752  = {0};
1753
1754enum {
1755  LSB_BITS = 8,
1756  LSB_MASK = right_n_bits(LSB_BITS)
1757};
1758
1759void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1760                                       Bytecodes::Code bc) {
1761  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1762  assert(action >= 0 && action < Action_LIMIT, "oob");
1763  _deoptimization_hist[Reason_none][0][0] += 1;  // total
1764  _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
1765  juint* cases = _deoptimization_hist[reason][1+action];
1766  juint* bc_counter_addr = NULL;
1767  juint  bc_counter      = 0;
1768  // Look for an unused counter, or an exact match to this BC.
1769  if (bc != Bytecodes::_illegal) {
1770    for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1771      juint* counter_addr = &cases[bc_case];
1772      juint  counter = *counter_addr;
1773      if ((counter == 0 && bc_counter_addr == NULL)
1774          || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
1775        // this counter is either free or is already devoted to this BC
1776        bc_counter_addr = counter_addr;
1777        bc_counter = counter | bc;
1778      }
1779    }
1780  }
1781  if (bc_counter_addr == NULL) {
1782    // Overflow, or no given bytecode.
1783    bc_counter_addr = &cases[BC_CASE_LIMIT-1];
1784    bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
1785  }
1786  *bc_counter_addr = bc_counter + (1 << LSB_BITS);
1787}
1788
1789jint Deoptimization::total_deoptimization_count() {
1790  return _deoptimization_hist[Reason_none][0][0];
1791}
1792
1793jint Deoptimization::deoptimization_count(DeoptReason reason) {
1794  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1795  return _deoptimization_hist[reason][0][0];
1796}
1797
1798void Deoptimization::print_statistics() {
1799  juint total = total_deoptimization_count();
1800  juint account = total;
1801  if (total != 0) {
1802    ttyLocker ttyl;
1803    if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
1804    tty->print_cr("Deoptimization traps recorded:");
1805    #define PRINT_STAT_LINE(name, r) \
1806      tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
1807    PRINT_STAT_LINE("total", total);
1808    // For each non-zero entry in the histogram, print the reason,
1809    // the action, and (if specifically known) the type of bytecode.
1810    for (int reason = 0; reason < Reason_LIMIT; reason++) {
1811      for (int action = 0; action < Action_LIMIT; action++) {
1812        juint* cases = _deoptimization_hist[reason][1+action];
1813        for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1814          juint counter = cases[bc_case];
1815          if (counter != 0) {
1816            char name[1*K];
1817            Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
1818            if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
1819              bc = Bytecodes::_illegal;
1820            sprintf(name, "%s/%s/%s",
1821                    trap_reason_name(reason),
1822                    trap_action_name(action),
1823                    Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
1824            juint r = counter >> LSB_BITS;
1825            tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
1826            account -= r;
1827          }
1828        }
1829      }
1830    }
1831    if (account != 0) {
1832      PRINT_STAT_LINE("unaccounted", account);
1833    }
1834    #undef PRINT_STAT_LINE
1835    if (xtty != NULL)  xtty->tail("statistics");
1836  }
1837}
1838#else // COMPILER2
1839
1840
1841// Stubs for C1 only system.
1842bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1843  return false;
1844}
1845
1846const char* Deoptimization::trap_reason_name(int reason) {
1847  return "unknown";
1848}
1849
1850void Deoptimization::print_statistics() {
1851  // no output
1852}
1853
1854void
1855Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
1856  // no udpate
1857}
1858
1859int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1860  return 0;
1861}
1862
1863void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1864                                       Bytecodes::Code bc) {
1865  // no update
1866}
1867
1868const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1869                                              int trap_state) {
1870  jio_snprintf(buf, buflen, "#%d", trap_state);
1871  return buf;
1872}
1873
1874#endif // COMPILER2
1875