deoptimization.cpp revision 9287:40bd4478a362
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "code/codeCache.hpp"
28#include "code/debugInfoRec.hpp"
29#include "code/nmethod.hpp"
30#include "code/pcDesc.hpp"
31#include "code/scopeDesc.hpp"
32#include "interpreter/bytecode.hpp"
33#include "interpreter/interpreter.hpp"
34#include "interpreter/oopMapCache.hpp"
35#include "memory/allocation.inline.hpp"
36#include "memory/oopFactory.hpp"
37#include "memory/resourceArea.hpp"
38#include "oops/method.hpp"
39#include "oops/oop.inline.hpp"
40#include "oops/fieldStreams.hpp"
41#include "oops/verifyOopClosure.hpp"
42#include "prims/jvmtiThreadState.hpp"
43#include "runtime/biasedLocking.hpp"
44#include "runtime/compilationPolicy.hpp"
45#include "runtime/deoptimization.hpp"
46#include "runtime/interfaceSupport.hpp"
47#include "runtime/sharedRuntime.hpp"
48#include "runtime/signature.hpp"
49#include "runtime/stubRoutines.hpp"
50#include "runtime/thread.hpp"
51#include "runtime/vframe.hpp"
52#include "runtime/vframeArray.hpp"
53#include "runtime/vframe_hp.hpp"
54#include "utilities/events.hpp"
55#include "utilities/xmlstream.hpp"
56
57#if INCLUDE_JVMCI
58#include "jvmci/jvmciRuntime.hpp"
59#include "jvmci/jvmciJavaClasses.hpp"
60#endif
61
62
63bool DeoptimizationMarker::_is_active = false;
64
65Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
66                                         int  caller_adjustment,
67                                         int  caller_actual_parameters,
68                                         int  number_of_frames,
69                                         intptr_t* frame_sizes,
70                                         address* frame_pcs,
71                                         BasicType return_type) {
72  _size_of_deoptimized_frame = size_of_deoptimized_frame;
73  _caller_adjustment         = caller_adjustment;
74  _caller_actual_parameters  = caller_actual_parameters;
75  _number_of_frames          = number_of_frames;
76  _frame_sizes               = frame_sizes;
77  _frame_pcs                 = frame_pcs;
78  _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
79  _return_type               = return_type;
80  _initial_info              = 0;
81  // PD (x86 only)
82  _counter_temp              = 0;
83  _unpack_kind               = 0;
84  _sender_sp_temp            = 0;
85
86  _total_frame_sizes         = size_of_frames();
87}
88
89
90Deoptimization::UnrollBlock::~UnrollBlock() {
91  FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
92  FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
93  FREE_C_HEAP_ARRAY(intptr_t, _register_block);
94}
95
96
97intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
98  assert(register_number < RegisterMap::reg_count, "checking register number");
99  return &_register_block[register_number * 2];
100}
101
102
103
104int Deoptimization::UnrollBlock::size_of_frames() const {
105  // Acount first for the adjustment of the initial frame
106  int result = _caller_adjustment;
107  for (int index = 0; index < number_of_frames(); index++) {
108    result += frame_sizes()[index];
109  }
110  return result;
111}
112
113
114void Deoptimization::UnrollBlock::print() {
115  ttyLocker ttyl;
116  tty->print_cr("UnrollBlock");
117  tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
118  tty->print(   "  frame_sizes: ");
119  for (int index = 0; index < number_of_frames(); index++) {
120    tty->print(INTX_FORMAT " ", frame_sizes()[index]);
121  }
122  tty->cr();
123}
124
125
126// In order to make fetch_unroll_info work properly with escape
127// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
128// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
129// of previously eliminated objects occurs in realloc_objects, which is
130// called from the method fetch_unroll_info_helper below.
131JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
132  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
133  // but makes the entry a little slower. There is however a little dance we have to
134  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
135
136  // fetch_unroll_info() is called at the beginning of the deoptimization
137  // handler. Note this fact before we start generating temporary frames
138  // that can confuse an asynchronous stack walker. This counter is
139  // decremented at the end of unpack_frames().
140  if (TraceDeoptimization) {
141    tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread));
142  }
143  thread->inc_in_deopt_handler();
144
145  return fetch_unroll_info_helper(thread);
146JRT_END
147
148
149// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
150Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
151
152  // Note: there is a safepoint safety issue here. No matter whether we enter
153  // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
154  // the vframeArray is created.
155  //
156
157  // Allocate our special deoptimization ResourceMark
158  DeoptResourceMark* dmark = new DeoptResourceMark(thread);
159  assert(thread->deopt_mark() == NULL, "Pending deopt!");
160  thread->set_deopt_mark(dmark);
161
162  frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
163  RegisterMap map(thread, true);
164  RegisterMap dummy_map(thread, false);
165  // Now get the deoptee with a valid map
166  frame deoptee = stub_frame.sender(&map);
167  // Set the deoptee nmethod
168  assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
169  thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
170  bool skip_internal = thread->deopt_nmethod() != NULL && !thread->deopt_nmethod()->compiler()->is_jvmci();
171
172  if (VerifyStack) {
173    thread->validate_frame_layout();
174  }
175
176  // Create a growable array of VFrames where each VFrame represents an inlined
177  // Java frame.  This storage is allocated with the usual system arena.
178  assert(deoptee.is_compiled_frame(), "Wrong frame type");
179  GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
180  vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
181  while (!vf->is_top()) {
182    assert(vf->is_compiled_frame(), "Wrong frame type");
183    chunk->push(compiledVFrame::cast(vf));
184    vf = vf->sender();
185  }
186  assert(vf->is_compiled_frame(), "Wrong frame type");
187  chunk->push(compiledVFrame::cast(vf));
188
189  bool realloc_failures = false;
190
191#if defined(COMPILER2) || INCLUDE_JVMCI
192  // Reallocate the non-escaping objects and restore their fields. Then
193  // relock objects if synchronization on them was eliminated.
194#ifndef INCLUDE_JVMCI
195  if (DoEscapeAnalysis || EliminateNestedLocks) {
196    if (EliminateAllocations) {
197#endif // INCLUDE_JVMCI
198      assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
199      GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
200
201      // The flag return_oop() indicates call sites which return oop
202      // in compiled code. Such sites include java method calls,
203      // runtime calls (for example, used to allocate new objects/arrays
204      // on slow code path) and any other calls generated in compiled code.
205      // It is not guaranteed that we can get such information here only
206      // by analyzing bytecode in deoptimized frames. This is why this flag
207      // is set during method compilation (see Compile::Process_OopMap_Node()).
208      // If the previous frame was popped, we don't have a result.
209      bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution();
210      Handle return_value;
211      if (save_oop_result) {
212        // Reallocation may trigger GC. If deoptimization happened on return from
213        // call which returns oop we need to save it since it is not in oopmap.
214        oop result = deoptee.saved_oop_result(&map);
215        assert(result == NULL || result->is_oop(), "must be oop");
216        return_value = Handle(thread, result);
217        assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
218        if (TraceDeoptimization) {
219          ttyLocker ttyl;
220          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
221        }
222      }
223      if (objects != NULL) {
224        JRT_BLOCK
225          realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
226        JRT_END
227        reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
228#ifndef PRODUCT
229        if (TraceDeoptimization) {
230          ttyLocker ttyl;
231          tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
232          print_objects(objects, realloc_failures);
233        }
234#endif
235      }
236      if (save_oop_result) {
237        // Restore result.
238        deoptee.set_saved_oop_result(&map, return_value());
239      }
240#ifndef INCLUDE_JVMCI
241    }
242    if (EliminateLocks) {
243#endif // INCLUDE_JVMCI
244#ifndef PRODUCT
245      bool first = true;
246#endif
247      for (int i = 0; i < chunk->length(); i++) {
248        compiledVFrame* cvf = chunk->at(i);
249        assert (cvf->scope() != NULL,"expect only compiled java frames");
250        GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
251        if (monitors->is_nonempty()) {
252          relock_objects(monitors, thread, realloc_failures);
253#ifndef PRODUCT
254          if (PrintDeoptimizationDetails) {
255            ttyLocker ttyl;
256            for (int j = 0; j < monitors->length(); j++) {
257              MonitorInfo* mi = monitors->at(j);
258              if (mi->eliminated()) {
259                if (first) {
260                  first = false;
261                  tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
262                }
263                if (mi->owner_is_scalar_replaced()) {
264                  Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
265                  tty->print_cr("     failed reallocation for klass %s", k->external_name());
266                } else {
267                  tty->print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
268                }
269              }
270            }
271          }
272#endif // !PRODUCT
273        }
274      }
275#ifndef INCLUDE_JVMCI
276    }
277  }
278#endif // INCLUDE_JVMCI
279#endif // COMPILER2 || INCLUDE_JVMCI
280
281  // Ensure that no safepoint is taken after pointers have been stored
282  // in fields of rematerialized objects.  If a safepoint occurs from here on
283  // out the java state residing in the vframeArray will be missed.
284  No_Safepoint_Verifier no_safepoint;
285
286  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
287#if defined(COMPILER2) || INCLUDE_JVMCI
288  if (realloc_failures) {
289    pop_frames_failed_reallocs(thread, array);
290  }
291#endif
292
293  assert(thread->vframe_array_head() == NULL, "Pending deopt!");
294  thread->set_vframe_array_head(array);
295
296  // Now that the vframeArray has been created if we have any deferred local writes
297  // added by jvmti then we can free up that structure as the data is now in the
298  // vframeArray
299
300  if (thread->deferred_locals() != NULL) {
301    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
302    int i = 0;
303    do {
304      // Because of inlining we could have multiple vframes for a single frame
305      // and several of the vframes could have deferred writes. Find them all.
306      if (list->at(i)->id() == array->original().id()) {
307        jvmtiDeferredLocalVariableSet* dlv = list->at(i);
308        list->remove_at(i);
309        // individual jvmtiDeferredLocalVariableSet are CHeapObj's
310        delete dlv;
311      } else {
312        i++;
313      }
314    } while ( i < list->length() );
315    if (list->length() == 0) {
316      thread->set_deferred_locals(NULL);
317      // free the list and elements back to C heap.
318      delete list;
319    }
320
321  }
322
323#ifndef SHARK
324  // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
325  CodeBlob* cb = stub_frame.cb();
326  // Verify we have the right vframeArray
327  assert(cb->frame_size() >= 0, "Unexpected frame size");
328  intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
329
330  // If the deopt call site is a MethodHandle invoke call site we have
331  // to adjust the unpack_sp.
332  nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
333  if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
334    unpack_sp = deoptee.unextended_sp();
335
336#ifdef ASSERT
337  assert(cb->is_deoptimization_stub() ||
338         cb->is_uncommon_trap_stub() ||
339         strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
340         strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
341         "unexpected code blob: %s", cb->name());
342#endif
343#else
344  intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
345#endif // !SHARK
346
347  // This is a guarantee instead of an assert because if vframe doesn't match
348  // we will unpack the wrong deoptimized frame and wind up in strange places
349  // where it will be very difficult to figure out what went wrong. Better
350  // to die an early death here than some very obscure death later when the
351  // trail is cold.
352  // Note: on ia64 this guarantee can be fooled by frames with no memory stack
353  // in that it will fail to detect a problem when there is one. This needs
354  // more work in tiger timeframe.
355  guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
356
357  int number_of_frames = array->frames();
358
359  // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
360  // virtual activation, which is the reverse of the elements in the vframes array.
361  intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
362  // +1 because we always have an interpreter return address for the final slot.
363  address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
364  int popframe_extra_args = 0;
365  // Create an interpreter return address for the stub to use as its return
366  // address so the skeletal frames are perfectly walkable
367  frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
368
369  // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
370  // activation be put back on the expression stack of the caller for reexecution
371  if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
372    popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
373  }
374
375  // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
376  // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
377  // than simply use array->sender.pc(). This requires us to walk the current set of frames
378  //
379  frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
380  deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
381
382  // It's possible that the number of parameters at the call site is
383  // different than number of arguments in the callee when method
384  // handles are used.  If the caller is interpreted get the real
385  // value so that the proper amount of space can be added to it's
386  // frame.
387  bool caller_was_method_handle = false;
388  if (deopt_sender.is_interpreted_frame()) {
389    methodHandle method = deopt_sender.interpreter_frame_method();
390    Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
391    if (cur.is_invokedynamic() || cur.is_invokehandle()) {
392      // Method handle invokes may involve fairly arbitrary chains of
393      // calls so it's impossible to know how much actual space the
394      // caller has for locals.
395      caller_was_method_handle = true;
396    }
397  }
398
399  //
400  // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
401  // frame_sizes/frame_pcs[1] next oldest frame (int)
402  // frame_sizes/frame_pcs[n] youngest frame (int)
403  //
404  // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
405  // owns the space for the return address to it's caller).  Confusing ain't it.
406  //
407  // The vframe array can address vframes with indices running from
408  // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
409  // When we create the skeletal frames we need the oldest frame to be in the zero slot
410  // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
411  // so things look a little strange in this loop.
412  //
413  int callee_parameters = 0;
414  int callee_locals = 0;
415  for (int index = 0; index < array->frames(); index++ ) {
416    // frame[number_of_frames - 1 ] = on_stack_size(youngest)
417    // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
418    // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
419    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
420                                                                                                    callee_locals,
421                                                                                                    index == 0,
422                                                                                                    popframe_extra_args);
423    // This pc doesn't have to be perfect just good enough to identify the frame
424    // as interpreted so the skeleton frame will be walkable
425    // The correct pc will be set when the skeleton frame is completely filled out
426    // The final pc we store in the loop is wrong and will be overwritten below
427    frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
428
429    callee_parameters = array->element(index)->method()->size_of_parameters();
430    callee_locals = array->element(index)->method()->max_locals();
431    popframe_extra_args = 0;
432  }
433
434  // Compute whether the root vframe returns a float or double value.
435  BasicType return_type;
436  {
437    HandleMark hm;
438    methodHandle method(thread, array->element(0)->method());
439    Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
440    return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
441  }
442
443  // Compute information for handling adapters and adjusting the frame size of the caller.
444  int caller_adjustment = 0;
445
446  // Compute the amount the oldest interpreter frame will have to adjust
447  // its caller's stack by. If the caller is a compiled frame then
448  // we pretend that the callee has no parameters so that the
449  // extension counts for the full amount of locals and not just
450  // locals-parms. This is because without a c2i adapter the parm
451  // area as created by the compiled frame will not be usable by
452  // the interpreter. (Depending on the calling convention there
453  // may not even be enough space).
454
455  // QQQ I'd rather see this pushed down into last_frame_adjust
456  // and have it take the sender (aka caller).
457
458  if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
459    caller_adjustment = last_frame_adjust(0, callee_locals);
460  } else if (callee_locals > callee_parameters) {
461    // The caller frame may need extending to accommodate
462    // non-parameter locals of the first unpacked interpreted frame.
463    // Compute that adjustment.
464    caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
465  }
466
467  // If the sender is deoptimized the we must retrieve the address of the handler
468  // since the frame will "magically" show the original pc before the deopt
469  // and we'd undo the deopt.
470
471  frame_pcs[0] = deopt_sender.raw_pc();
472
473#ifndef SHARK
474  assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
475#endif // SHARK
476
477  UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
478                                      caller_adjustment * BytesPerWord,
479                                      caller_was_method_handle ? 0 : callee_parameters,
480                                      number_of_frames,
481                                      frame_sizes,
482                                      frame_pcs,
483                                      return_type);
484  // On some platforms, we need a way to pass some platform dependent
485  // information to the unpacking code so the skeletal frames come out
486  // correct (initial fp value, unextended sp, ...)
487  info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
488
489  if (array->frames() > 1) {
490    if (VerifyStack && TraceDeoptimization) {
491      ttyLocker ttyl;
492      tty->print_cr("Deoptimizing method containing inlining");
493    }
494  }
495
496  array->set_unroll_block(info);
497  return info;
498}
499
500// Called to cleanup deoptimization data structures in normal case
501// after unpacking to stack and when stack overflow error occurs
502void Deoptimization::cleanup_deopt_info(JavaThread *thread,
503                                        vframeArray *array) {
504
505  // Get array if coming from exception
506  if (array == NULL) {
507    array = thread->vframe_array_head();
508  }
509  thread->set_vframe_array_head(NULL);
510
511  // Free the previous UnrollBlock
512  vframeArray* old_array = thread->vframe_array_last();
513  thread->set_vframe_array_last(array);
514
515  if (old_array != NULL) {
516    UnrollBlock* old_info = old_array->unroll_block();
517    old_array->set_unroll_block(NULL);
518    delete old_info;
519    delete old_array;
520  }
521
522  // Deallocate any resource creating in this routine and any ResourceObjs allocated
523  // inside the vframeArray (StackValueCollections)
524
525  delete thread->deopt_mark();
526  thread->set_deopt_mark(NULL);
527  thread->set_deopt_nmethod(NULL);
528
529
530  if (JvmtiExport::can_pop_frame()) {
531#ifndef CC_INTERP
532    // Regardless of whether we entered this routine with the pending
533    // popframe condition bit set, we should always clear it now
534    thread->clear_popframe_condition();
535#else
536    // C++ interpreter will clear has_pending_popframe when it enters
537    // with method_resume. For deopt_resume2 we clear it now.
538    if (thread->popframe_forcing_deopt_reexecution())
539        thread->clear_popframe_condition();
540#endif /* CC_INTERP */
541  }
542
543  // unpack_frames() is called at the end of the deoptimization handler
544  // and (in C2) at the end of the uncommon trap handler. Note this fact
545  // so that an asynchronous stack walker can work again. This counter is
546  // incremented at the beginning of fetch_unroll_info() and (in C2) at
547  // the beginning of uncommon_trap().
548  thread->dec_in_deopt_handler();
549}
550
551
552// Return BasicType of value being returned
553JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
554
555  // We are already active int he special DeoptResourceMark any ResourceObj's we
556  // allocate will be freed at the end of the routine.
557
558  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
559  // but makes the entry a little slower. There is however a little dance we have to
560  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
561  ResetNoHandleMark rnhm; // No-op in release/product versions
562  HandleMark hm;
563
564  frame stub_frame = thread->last_frame();
565
566  // Since the frame to unpack is the top frame of this thread, the vframe_array_head
567  // must point to the vframeArray for the unpack frame.
568  vframeArray* array = thread->vframe_array_head();
569
570#ifndef PRODUCT
571  if (TraceDeoptimization) {
572    ttyLocker ttyl;
573    tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d",
574                  p2i(thread), p2i(array), exec_mode);
575  }
576#endif
577  Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
578              p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode);
579
580  UnrollBlock* info = array->unroll_block();
581
582  // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
583  array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
584
585  BasicType bt = info->return_type();
586
587  // If we have an exception pending, claim that the return type is an oop
588  // so the deopt_blob does not overwrite the exception_oop.
589
590  if (exec_mode == Unpack_exception)
591    bt = T_OBJECT;
592
593  // Cleanup thread deopt data
594  cleanup_deopt_info(thread, array);
595
596#ifndef PRODUCT
597  if (VerifyStack) {
598    ResourceMark res_mark;
599
600    thread->validate_frame_layout();
601
602    // Verify that the just-unpacked frames match the interpreter's
603    // notions of expression stack and locals
604    vframeArray* cur_array = thread->vframe_array_last();
605    RegisterMap rm(thread, false);
606    rm.set_include_argument_oops(false);
607    bool is_top_frame = true;
608    int callee_size_of_parameters = 0;
609    int callee_max_locals = 0;
610    for (int i = 0; i < cur_array->frames(); i++) {
611      vframeArrayElement* el = cur_array->element(i);
612      frame* iframe = el->iframe();
613      guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
614
615      // Get the oop map for this bci
616      InterpreterOopMap mask;
617      int cur_invoke_parameter_size = 0;
618      bool try_next_mask = false;
619      int next_mask_expression_stack_size = -1;
620      int top_frame_expression_stack_adjustment = 0;
621      methodHandle mh(thread, iframe->interpreter_frame_method());
622      OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
623      BytecodeStream str(mh);
624      str.set_start(iframe->interpreter_frame_bci());
625      int max_bci = mh->code_size();
626      // Get to the next bytecode if possible
627      assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
628      // Check to see if we can grab the number of outgoing arguments
629      // at an uncommon trap for an invoke (where the compiler
630      // generates debug info before the invoke has executed)
631      Bytecodes::Code cur_code = str.next();
632      if (cur_code == Bytecodes::_invokevirtual   ||
633          cur_code == Bytecodes::_invokespecial   ||
634          cur_code == Bytecodes::_invokestatic    ||
635          cur_code == Bytecodes::_invokeinterface ||
636          cur_code == Bytecodes::_invokedynamic) {
637        Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
638        Symbol* signature = invoke.signature();
639        ArgumentSizeComputer asc(signature);
640        cur_invoke_parameter_size = asc.size();
641        if (invoke.has_receiver()) {
642          // Add in receiver
643          ++cur_invoke_parameter_size;
644        }
645        if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
646          callee_size_of_parameters++;
647        }
648      }
649      if (str.bci() < max_bci) {
650        Bytecodes::Code bc = str.next();
651        if (bc >= 0) {
652          // The interpreter oop map generator reports results before
653          // the current bytecode has executed except in the case of
654          // calls. It seems to be hard to tell whether the compiler
655          // has emitted debug information matching the "state before"
656          // a given bytecode or the state after, so we try both
657          switch (cur_code) {
658            case Bytecodes::_invokevirtual:
659            case Bytecodes::_invokespecial:
660            case Bytecodes::_invokestatic:
661            case Bytecodes::_invokeinterface:
662            case Bytecodes::_invokedynamic:
663            case Bytecodes::_athrow:
664              break;
665            default: {
666              InterpreterOopMap next_mask;
667              OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
668              next_mask_expression_stack_size = next_mask.expression_stack_size();
669              // Need to subtract off the size of the result type of
670              // the bytecode because this is not described in the
671              // debug info but returned to the interpreter in the TOS
672              // caching register
673              BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
674              if (bytecode_result_type != T_ILLEGAL) {
675                top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
676              }
677              assert(top_frame_expression_stack_adjustment >= 0, "");
678              try_next_mask = true;
679              break;
680            }
681          }
682        }
683      }
684
685      // Verify stack depth and oops in frame
686      // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
687      if (!(
688            /* SPARC */
689            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
690            /* x86 */
691            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
692            (try_next_mask &&
693             (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
694                                                                    top_frame_expression_stack_adjustment))) ||
695            (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
696            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
697             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
698            )) {
699        ttyLocker ttyl;
700
701        // Print out some information that will help us debug the problem
702        tty->print_cr("Wrong number of expression stack elements during deoptimization");
703        tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
704        tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
705                      iframe->interpreter_frame_expression_stack_size());
706        tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
707        tty->print_cr("  try_next_mask = %d", try_next_mask);
708        tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
709        tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
710        tty->print_cr("  callee_max_locals = %d", callee_max_locals);
711        tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
712        tty->print_cr("  exec_mode = %d", exec_mode);
713        tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
714        tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
715        tty->print_cr("  Interpreted frames:");
716        for (int k = 0; k < cur_array->frames(); k++) {
717          vframeArrayElement* el = cur_array->element(k);
718          tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
719        }
720        cur_array->print_on_2(tty);
721        guarantee(false, "wrong number of expression stack elements during deopt");
722      }
723      VerifyOopClosure verify;
724      iframe->oops_interpreted_do(&verify, NULL, &rm, false);
725      callee_size_of_parameters = mh->size_of_parameters();
726      callee_max_locals = mh->max_locals();
727      is_top_frame = false;
728    }
729  }
730#endif /* !PRODUCT */
731
732
733  return bt;
734JRT_END
735
736
737int Deoptimization::deoptimize_dependents() {
738  Threads::deoptimized_wrt_marked_nmethods();
739  return 0;
740}
741
742Deoptimization::DeoptAction Deoptimization::_unloaded_action
743  = Deoptimization::Action_reinterpret;
744
745#if defined(COMPILER2) || INCLUDE_JVMCI
746bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
747  Handle pending_exception(thread->pending_exception());
748  const char* exception_file = thread->exception_file();
749  int exception_line = thread->exception_line();
750  thread->clear_pending_exception();
751
752  bool failures = false;
753
754  for (int i = 0; i < objects->length(); i++) {
755    assert(objects->at(i)->is_object(), "invalid debug information");
756    ObjectValue* sv = (ObjectValue*) objects->at(i);
757
758    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
759    oop obj = NULL;
760
761    if (k->oop_is_instance()) {
762      InstanceKlass* ik = InstanceKlass::cast(k());
763      obj = ik->allocate_instance(THREAD);
764    } else if (k->oop_is_typeArray()) {
765      TypeArrayKlass* ak = TypeArrayKlass::cast(k());
766      assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
767      int len = sv->field_size() / type2size[ak->element_type()];
768      obj = ak->allocate(len, THREAD);
769    } else if (k->oop_is_objArray()) {
770      ObjArrayKlass* ak = ObjArrayKlass::cast(k());
771      obj = ak->allocate(sv->field_size(), THREAD);
772    }
773
774    if (obj == NULL) {
775      failures = true;
776    }
777
778    assert(sv->value().is_null(), "redundant reallocation");
779    assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
780    CLEAR_PENDING_EXCEPTION;
781    sv->set_value(obj);
782  }
783
784  if (failures) {
785    THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
786  } else if (pending_exception.not_null()) {
787    thread->set_pending_exception(pending_exception(), exception_file, exception_line);
788  }
789
790  return failures;
791}
792
793// restore elements of an eliminated type array
794void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
795  int index = 0;
796  intptr_t val;
797
798  for (int i = 0; i < sv->field_size(); i++) {
799    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
800    switch(type) {
801    case T_LONG: case T_DOUBLE: {
802      assert(value->type() == T_INT, "Agreement.");
803      StackValue* low =
804        StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
805#ifdef _LP64
806      jlong res = (jlong)low->get_int();
807#else
808#ifdef SPARC
809      // For SPARC we have to swap high and low words.
810      jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
811#else
812      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
813#endif //SPARC
814#endif
815      obj->long_at_put(index, res);
816      break;
817    }
818
819    // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
820    case T_INT: case T_FLOAT: { // 4 bytes.
821      assert(value->type() == T_INT, "Agreement.");
822      bool big_value = false;
823      if (i + 1 < sv->field_size() && type == T_INT) {
824        if (sv->field_at(i)->is_location()) {
825          Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
826          if (type == Location::dbl || type == Location::lng) {
827            big_value = true;
828          }
829        } else if (sv->field_at(i)->is_constant_int()) {
830          ScopeValue* next_scope_field = sv->field_at(i + 1);
831          if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
832            big_value = true;
833          }
834        }
835      }
836
837      if (big_value) {
838        StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
839  #ifdef _LP64
840        jlong res = (jlong)low->get_int();
841  #else
842  #ifdef SPARC
843        // For SPARC we have to swap high and low words.
844        jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
845  #else
846        jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
847  #endif //SPARC
848  #endif
849        obj->int_at_put(index, (jint)*((jint*)&res));
850        obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
851      } else {
852        val = value->get_int();
853        obj->int_at_put(index, (jint)*((jint*)&val));
854      }
855      break;
856    }
857
858    case T_SHORT: case T_CHAR: // 2 bytes
859      assert(value->type() == T_INT, "Agreement.");
860      val = value->get_int();
861      obj->short_at_put(index, (jshort)*((jint*)&val));
862      break;
863
864    case T_BOOLEAN: case T_BYTE: // 1 byte
865      assert(value->type() == T_INT, "Agreement.");
866      val = value->get_int();
867      obj->bool_at_put(index, (jboolean)*((jint*)&val));
868      break;
869
870      default:
871        ShouldNotReachHere();
872    }
873    index++;
874  }
875}
876
877
878// restore fields of an eliminated object array
879void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
880  for (int i = 0; i < sv->field_size(); i++) {
881    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
882    assert(value->type() == T_OBJECT, "object element expected");
883    obj->obj_at_put(i, value->get_obj()());
884  }
885}
886
887class ReassignedField {
888public:
889  int _offset;
890  BasicType _type;
891public:
892  ReassignedField() {
893    _offset = 0;
894    _type = T_ILLEGAL;
895  }
896};
897
898int compare(ReassignedField* left, ReassignedField* right) {
899  return left->_offset - right->_offset;
900}
901
902// Restore fields of an eliminated instance object using the same field order
903// returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
904static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
905  if (klass->superklass() != NULL) {
906    svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal);
907  }
908
909  GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
910  for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
911    if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
912      ReassignedField field;
913      field._offset = fs.offset();
914      field._type = FieldType::basic_type(fs.signature());
915      fields->append(field);
916    }
917  }
918  fields->sort(compare);
919  for (int i = 0; i < fields->length(); i++) {
920    intptr_t val;
921    ScopeValue* scope_field = sv->field_at(svIndex);
922    StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
923    int offset = fields->at(i)._offset;
924    BasicType type = fields->at(i)._type;
925    switch (type) {
926      case T_OBJECT: case T_ARRAY:
927        assert(value->type() == T_OBJECT, "Agreement.");
928        obj->obj_field_put(offset, value->get_obj()());
929        break;
930
931      // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
932      case T_INT: case T_FLOAT: { // 4 bytes.
933        assert(value->type() == T_INT, "Agreement.");
934        bool big_value = false;
935        if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
936          if (scope_field->is_location()) {
937            Location::Type type = ((LocationValue*) scope_field)->location().type();
938            if (type == Location::dbl || type == Location::lng) {
939              big_value = true;
940            }
941          }
942          if (scope_field->is_constant_int()) {
943            ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
944            if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
945              big_value = true;
946            }
947          }
948        }
949
950        if (big_value) {
951          i++;
952          assert(i < fields->length(), "second T_INT field needed");
953          assert(fields->at(i)._type == T_INT, "T_INT field needed");
954        } else {
955          val = value->get_int();
956          obj->int_field_put(offset, (jint)*((jint*)&val));
957          break;
958        }
959      }
960        /* no break */
961
962      case T_LONG: case T_DOUBLE: {
963        assert(value->type() == T_INT, "Agreement.");
964        StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
965#ifdef _LP64
966        jlong res = (jlong)low->get_int();
967#else
968#ifdef SPARC
969        // For SPARC we have to swap high and low words.
970        jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
971#else
972        jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
973#endif //SPARC
974#endif
975        obj->long_field_put(offset, res);
976        break;
977      }
978
979      case T_SHORT: case T_CHAR: // 2 bytes
980        assert(value->type() == T_INT, "Agreement.");
981        val = value->get_int();
982        obj->short_field_put(offset, (jshort)*((jint*)&val));
983        break;
984
985      case T_BOOLEAN: case T_BYTE: // 1 byte
986        assert(value->type() == T_INT, "Agreement.");
987        val = value->get_int();
988        obj->bool_field_put(offset, (jboolean)*((jint*)&val));
989        break;
990
991      default:
992        ShouldNotReachHere();
993    }
994    svIndex++;
995  }
996  return svIndex;
997}
998
999// restore fields of all eliminated objects and arrays
1000void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1001  for (int i = 0; i < objects->length(); i++) {
1002    ObjectValue* sv = (ObjectValue*) objects->at(i);
1003    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
1004    Handle obj = sv->value();
1005    assert(obj.not_null() || realloc_failures, "reallocation was missed");
1006    if (PrintDeoptimizationDetails) {
1007      tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1008    }
1009    if (obj.is_null()) {
1010      continue;
1011    }
1012
1013    if (k->oop_is_instance()) {
1014      InstanceKlass* ik = InstanceKlass::cast(k());
1015      reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1016    } else if (k->oop_is_typeArray()) {
1017      TypeArrayKlass* ak = TypeArrayKlass::cast(k());
1018      reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1019    } else if (k->oop_is_objArray()) {
1020      reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1021    }
1022  }
1023}
1024
1025
1026// relock objects for which synchronization was eliminated
1027void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
1028  for (int i = 0; i < monitors->length(); i++) {
1029    MonitorInfo* mon_info = monitors->at(i);
1030    if (mon_info->eliminated()) {
1031      assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1032      if (!mon_info->owner_is_scalar_replaced()) {
1033        Handle obj = Handle(mon_info->owner());
1034        markOop mark = obj->mark();
1035        if (UseBiasedLocking && mark->has_bias_pattern()) {
1036          // New allocated objects may have the mark set to anonymously biased.
1037          // Also the deoptimized method may called methods with synchronization
1038          // where the thread-local object is bias locked to the current thread.
1039          assert(mark->is_biased_anonymously() ||
1040                 mark->biased_locker() == thread, "should be locked to current thread");
1041          // Reset mark word to unbiased prototype.
1042          markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
1043          obj->set_mark(unbiased_prototype);
1044        }
1045        BasicLock* lock = mon_info->lock();
1046        ObjectSynchronizer::slow_enter(obj, lock, thread);
1047        assert(mon_info->owner()->is_locked(), "object must be locked now");
1048      }
1049    }
1050  }
1051}
1052
1053
1054#ifndef PRODUCT
1055// print information about reallocated objects
1056void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
1057  fieldDescriptor fd;
1058
1059  for (int i = 0; i < objects->length(); i++) {
1060    ObjectValue* sv = (ObjectValue*) objects->at(i);
1061    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
1062    Handle obj = sv->value();
1063
1064    tty->print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
1065    k->print_value();
1066    assert(obj.not_null() || realloc_failures, "reallocation was missed");
1067    if (obj.is_null()) {
1068      tty->print(" allocation failed");
1069    } else {
1070      tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
1071    }
1072    tty->cr();
1073
1074    if (Verbose && !obj.is_null()) {
1075      k->oop_print_on(obj(), tty);
1076    }
1077  }
1078}
1079#endif
1080#endif // COMPILER2 || INCLUDE_JVMCI
1081
1082vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1083  Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1084
1085#ifndef PRODUCT
1086  if (PrintDeoptimizationDetails) {
1087    ttyLocker ttyl;
1088    tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread));
1089    fr.print_on(tty);
1090    tty->print_cr("     Virtual frames (innermost first):");
1091    for (int index = 0; index < chunk->length(); index++) {
1092      compiledVFrame* vf = chunk->at(index);
1093      tty->print("       %2d - ", index);
1094      vf->print_value();
1095      int bci = chunk->at(index)->raw_bci();
1096      const char* code_name;
1097      if (bci == SynchronizationEntryBCI) {
1098        code_name = "sync entry";
1099      } else {
1100        Bytecodes::Code code = vf->method()->code_at(bci);
1101        code_name = Bytecodes::name(code);
1102      }
1103      tty->print(" - %s", code_name);
1104      tty->print_cr(" @ bci %d ", bci);
1105      if (Verbose) {
1106        vf->print();
1107        tty->cr();
1108      }
1109    }
1110  }
1111#endif
1112
1113  // Register map for next frame (used for stack crawl).  We capture
1114  // the state of the deopt'ing frame's caller.  Thus if we need to
1115  // stuff a C2I adapter we can properly fill in the callee-save
1116  // register locations.
1117  frame caller = fr.sender(reg_map);
1118  int frame_size = caller.sp() - fr.sp();
1119
1120  frame sender = caller;
1121
1122  // Since the Java thread being deoptimized will eventually adjust it's own stack,
1123  // the vframeArray containing the unpacking information is allocated in the C heap.
1124  // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1125  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1126
1127  // Compare the vframeArray to the collected vframes
1128  assert(array->structural_compare(thread, chunk), "just checking");
1129
1130#ifndef PRODUCT
1131  if (PrintDeoptimizationDetails) {
1132    ttyLocker ttyl;
1133    tty->print_cr("     Created vframeArray " INTPTR_FORMAT, p2i(array));
1134  }
1135#endif // PRODUCT
1136
1137  return array;
1138}
1139
1140#if defined(COMPILER2) || INCLUDE_JVMCI
1141void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1142  // Reallocation of some scalar replaced objects failed. Record
1143  // that we need to pop all the interpreter frames for the
1144  // deoptimized compiled frame.
1145  assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1146  thread->set_frames_to_pop_failed_realloc(array->frames());
1147  // Unlock all monitors here otherwise the interpreter will see a
1148  // mix of locked and unlocked monitors (because of failed
1149  // reallocations of synchronized objects) and be confused.
1150  for (int i = 0; i < array->frames(); i++) {
1151    MonitorChunk* monitors = array->element(i)->monitors();
1152    if (monitors != NULL) {
1153      for (int j = 0; j < monitors->number_of_monitors(); j++) {
1154        BasicObjectLock* src = monitors->at(j);
1155        if (src->obj() != NULL) {
1156          ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
1157        }
1158      }
1159      array->element(i)->free_monitors(thread);
1160#ifdef ASSERT
1161      array->element(i)->set_removed_monitors();
1162#endif
1163    }
1164  }
1165}
1166#endif
1167
1168static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1169  GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1170  for (int i = 0; i < monitors->length(); i++) {
1171    MonitorInfo* mon_info = monitors->at(i);
1172    if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1173      objects_to_revoke->append(Handle(mon_info->owner()));
1174    }
1175  }
1176}
1177
1178
1179void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1180  if (!UseBiasedLocking) {
1181    return;
1182  }
1183
1184  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1185
1186  // Unfortunately we don't have a RegisterMap available in most of
1187  // the places we want to call this routine so we need to walk the
1188  // stack again to update the register map.
1189  if (map == NULL || !map->update_map()) {
1190    StackFrameStream sfs(thread, true);
1191    bool found = false;
1192    while (!found && !sfs.is_done()) {
1193      frame* cur = sfs.current();
1194      sfs.next();
1195      found = cur->id() == fr.id();
1196    }
1197    assert(found, "frame to be deoptimized not found on target thread's stack");
1198    map = sfs.register_map();
1199  }
1200
1201  vframe* vf = vframe::new_vframe(&fr, map, thread);
1202  compiledVFrame* cvf = compiledVFrame::cast(vf);
1203  // Revoke monitors' biases in all scopes
1204  while (!cvf->is_top()) {
1205    collect_monitors(cvf, objects_to_revoke);
1206    cvf = compiledVFrame::cast(cvf->sender());
1207  }
1208  collect_monitors(cvf, objects_to_revoke);
1209
1210  if (SafepointSynchronize::is_at_safepoint()) {
1211    BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1212  } else {
1213    BiasedLocking::revoke(objects_to_revoke);
1214  }
1215}
1216
1217
1218void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
1219  if (!UseBiasedLocking) {
1220    return;
1221  }
1222
1223  assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
1224  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1225  for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
1226    if (jt->has_last_Java_frame()) {
1227      StackFrameStream sfs(jt, true);
1228      while (!sfs.is_done()) {
1229        frame* cur = sfs.current();
1230        if (cb->contains(cur->pc())) {
1231          vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
1232          compiledVFrame* cvf = compiledVFrame::cast(vf);
1233          // Revoke monitors' biases in all scopes
1234          while (!cvf->is_top()) {
1235            collect_monitors(cvf, objects_to_revoke);
1236            cvf = compiledVFrame::cast(cvf->sender());
1237          }
1238          collect_monitors(cvf, objects_to_revoke);
1239        }
1240        sfs.next();
1241      }
1242    }
1243  }
1244  BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1245}
1246
1247
1248void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1249  assert(fr.can_be_deoptimized(), "checking frame type");
1250
1251  gather_statistics(reason, Action_none, Bytecodes::_illegal);
1252
1253  if (LogCompilation && xtty != NULL) {
1254    nmethod* nm = fr.cb()->as_nmethod_or_null();
1255    assert(nm != NULL, "only compiled methods can deopt");
1256
1257    ttyLocker ttyl;
1258    xtty->begin_head("deoptimized thread='" UINTX_FORMAT "'", (uintx)thread->osthread()->thread_id());
1259    nm->log_identity(xtty);
1260    xtty->end_head();
1261    for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1262      xtty->begin_elem("jvms bci='%d'", sd->bci());
1263      xtty->method(sd->method());
1264      xtty->end_elem();
1265      if (sd->is_top())  break;
1266    }
1267    xtty->tail("deoptimized");
1268  }
1269
1270  // Patch the compiled method so that when execution returns to it we will
1271  // deopt the execution state and return to the interpreter.
1272  fr.deoptimize(thread);
1273}
1274
1275void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1276  deoptimize(thread, fr, map, Reason_constraint);
1277}
1278
1279void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
1280  // Deoptimize only if the frame comes from compile code.
1281  // Do not deoptimize the frame which is already patched
1282  // during the execution of the loops below.
1283  if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1284    return;
1285  }
1286  ResourceMark rm;
1287  DeoptimizationMarker dm;
1288  if (UseBiasedLocking) {
1289    revoke_biases_of_monitors(thread, fr, map);
1290  }
1291  deoptimize_single_frame(thread, fr, reason);
1292
1293}
1294
1295
1296void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1297  assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1298         "can only deoptimize other thread at a safepoint");
1299  // Compute frame and register map based on thread and sp.
1300  RegisterMap reg_map(thread, UseBiasedLocking);
1301  frame fr = thread->last_frame();
1302  while (fr.id() != id) {
1303    fr = fr.sender(&reg_map);
1304  }
1305  deoptimize(thread, fr, &reg_map, reason);
1306}
1307
1308
1309void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1310  if (thread == Thread::current()) {
1311    Deoptimization::deoptimize_frame_internal(thread, id, reason);
1312  } else {
1313    VM_DeoptimizeFrame deopt(thread, id, reason);
1314    VMThread::execute(&deopt);
1315  }
1316}
1317
1318void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1319  deoptimize_frame(thread, id, Reason_constraint);
1320}
1321
1322// JVMTI PopFrame support
1323JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1324{
1325  thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1326}
1327JRT_END
1328
1329MethodData*
1330Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
1331                                bool create_if_missing) {
1332  Thread* THREAD = thread;
1333  MethodData* mdo = m()->method_data();
1334  if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1335    // Build an MDO.  Ignore errors like OutOfMemory;
1336    // that simply means we won't have an MDO to update.
1337    Method::build_interpreter_method_data(m, THREAD);
1338    if (HAS_PENDING_EXCEPTION) {
1339      assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1340      CLEAR_PENDING_EXCEPTION;
1341    }
1342    mdo = m()->method_data();
1343  }
1344  return mdo;
1345}
1346
1347#if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI
1348void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1349  // in case of an unresolved klass entry, load the class.
1350  if (constant_pool->tag_at(index).is_unresolved_klass()) {
1351    Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
1352    return;
1353  }
1354
1355  if (!constant_pool->tag_at(index).is_symbol()) return;
1356
1357  Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
1358  Symbol*  symbol  = constant_pool->symbol_at(index);
1359
1360  // class name?
1361  if (symbol->byte_at(0) != '(') {
1362    Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1363    SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1364    return;
1365  }
1366
1367  // then it must be a signature!
1368  ResourceMark rm(THREAD);
1369  for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1370    if (ss.is_object()) {
1371      Symbol* class_name = ss.as_symbol(CHECK);
1372      Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1373      SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1374    }
1375  }
1376}
1377
1378
1379void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
1380  EXCEPTION_MARK;
1381  load_class_by_index(constant_pool, index, THREAD);
1382  if (HAS_PENDING_EXCEPTION) {
1383    // Exception happened during classloading. We ignore the exception here, since it
1384    // is going to be rethrown since the current activation is going to be deoptimized and
1385    // the interpreter will re-execute the bytecode.
1386    CLEAR_PENDING_EXCEPTION;
1387    // Class loading called java code which may have caused a stack
1388    // overflow. If the exception was thrown right before the return
1389    // to the runtime the stack is no longer guarded. Reguard the
1390    // stack otherwise if we return to the uncommon trap blob and the
1391    // stack bang causes a stack overflow we crash.
1392    assert(THREAD->is_Java_thread(), "only a java thread can be here");
1393    JavaThread* thread = (JavaThread*)THREAD;
1394    bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
1395    if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
1396    assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
1397  }
1398}
1399
1400JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1401  HandleMark hm;
1402
1403  // uncommon_trap() is called at the beginning of the uncommon trap
1404  // handler. Note this fact before we start generating temporary frames
1405  // that can confuse an asynchronous stack walker. This counter is
1406  // decremented at the end of unpack_frames().
1407  thread->inc_in_deopt_handler();
1408
1409  // We need to update the map if we have biased locking.
1410#if INCLUDE_JVMCI
1411  // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
1412  RegisterMap reg_map(thread, true);
1413#else
1414  RegisterMap reg_map(thread, UseBiasedLocking);
1415#endif
1416  frame stub_frame = thread->last_frame();
1417  frame fr = stub_frame.sender(&reg_map);
1418  // Make sure the calling nmethod is not getting deoptimized and removed
1419  // before we are done with it.
1420  nmethodLocker nl(fr.pc());
1421
1422  // Log a message
1423  Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
1424              trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
1425
1426  {
1427    ResourceMark rm;
1428
1429    // Revoke biases of any monitors in the frame to ensure we can migrate them
1430    revoke_biases_of_monitors(thread, fr, &reg_map);
1431
1432    DeoptReason reason = trap_request_reason(trap_request);
1433    DeoptAction action = trap_request_action(trap_request);
1434#if INCLUDE_JVMCI
1435    int debug_id = trap_request_debug_id(trap_request);
1436#endif
1437    jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1438
1439    vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1440    compiledVFrame* cvf = compiledVFrame::cast(vf);
1441
1442    nmethod* nm = cvf->code();
1443
1444    ScopeDesc*      trap_scope  = cvf->scope();
1445
1446    if (TraceDeoptimization) {
1447      ttyLocker ttyl;
1448      tty->print_cr("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
1449#if INCLUDE_JVMCI
1450          , debug_id
1451#endif
1452          );
1453    }
1454
1455    methodHandle    trap_method = trap_scope->method();
1456    int             trap_bci    = trap_scope->bci();
1457#if INCLUDE_JVMCI
1458    oop speculation = thread->pending_failed_speculation();
1459    if (nm->is_compiled_by_jvmci()) {
1460      if (speculation != NULL) {
1461        oop speculation_log = nm->speculation_log();
1462        if (speculation_log != NULL) {
1463          if (TraceDeoptimization || TraceUncollectedSpeculations) {
1464            if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) {
1465              tty->print_cr("A speculation that was not collected by the compiler is being overwritten");
1466            }
1467          }
1468          if (TraceDeoptimization) {
1469            tty->print_cr("Saving speculation to speculation log");
1470          }
1471          HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation);
1472        } else {
1473          if (TraceDeoptimization) {
1474            tty->print_cr("Speculation present but no speculation log");
1475          }
1476        }
1477        thread->set_pending_failed_speculation(NULL);
1478      } else {
1479        if (TraceDeoptimization) {
1480          tty->print_cr("No speculation");
1481        }
1482      }
1483    } else {
1484      assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers");
1485    }
1486
1487    if (trap_bci == SynchronizationEntryBCI) {
1488      trap_bci = 0;
1489      thread->set_pending_monitorenter(true);
1490    }
1491
1492    if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1493      thread->set_pending_transfer_to_interpreter(true);
1494    }
1495#endif
1496
1497    Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1498
1499    if (trap_scope->rethrow_exception()) {
1500      if (PrintDeoptimizationDetails) {
1501        tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_method->method_holder()->name()->as_C_string(), trap_method->name()->as_C_string(), trap_bci);
1502      }
1503      GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
1504      guarantee(expressions != NULL, "must have exception to throw");
1505      ScopeValue* topOfStack = expressions->top();
1506      Handle topOfStackObj = StackValue::create_stack_value(&fr, &reg_map, topOfStack)->get_obj();
1507      THREAD->set_pending_exception(topOfStackObj(), NULL, 0);
1508    }
1509
1510    // Record this event in the histogram.
1511    gather_statistics(reason, action, trap_bc);
1512
1513    // Ensure that we can record deopt. history:
1514    // Need MDO to record RTM code generation state.
1515    bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking );
1516
1517    methodHandle profiled_method;
1518#if INCLUDE_JVMCI
1519    if (nm->is_compiled_by_jvmci()) {
1520      profiled_method = nm->method();
1521    } else {
1522      profiled_method = trap_method;
1523    }
1524#else
1525    profiled_method = trap_method;
1526#endif
1527
1528    MethodData* trap_mdo =
1529      get_method_data(thread, profiled_method, create_if_missing);
1530
1531    // Log a message
1532    Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d",
1533                              trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()),
1534                              trap_method->name_and_sig_as_C_string(), trap_bci);
1535
1536    // Print a bunch of diagnostics, if requested.
1537    if (TraceDeoptimization || LogCompilation) {
1538      ResourceMark rm;
1539      ttyLocker ttyl;
1540      char buf[100];
1541      if (xtty != NULL) {
1542        xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
1543                         os::current_thread_id(),
1544                         format_trap_request(buf, sizeof(buf), trap_request));
1545        nm->log_identity(xtty);
1546      }
1547      Symbol* class_name = NULL;
1548      bool unresolved = false;
1549      if (unloaded_class_index >= 0) {
1550        constantPoolHandle constants (THREAD, trap_method->constants());
1551        if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1552          class_name = constants->klass_name_at(unloaded_class_index);
1553          unresolved = true;
1554          if (xtty != NULL)
1555            xtty->print(" unresolved='1'");
1556        } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1557          class_name = constants->symbol_at(unloaded_class_index);
1558        }
1559        if (xtty != NULL)
1560          xtty->name(class_name);
1561      }
1562      if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
1563        // Dump the relevant MDO state.
1564        // This is the deopt count for the current reason, any previous
1565        // reasons or recompiles seen at this point.
1566        int dcnt = trap_mdo->trap_count(reason);
1567        if (dcnt != 0)
1568          xtty->print(" count='%d'", dcnt);
1569        ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1570        int dos = (pdata == NULL)? 0: pdata->trap_state();
1571        if (dos != 0) {
1572          xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1573          if (trap_state_is_recompiled(dos)) {
1574            int recnt2 = trap_mdo->overflow_recompile_count();
1575            if (recnt2 != 0)
1576              xtty->print(" recompiles2='%d'", recnt2);
1577          }
1578        }
1579      }
1580      if (xtty != NULL) {
1581        xtty->stamp();
1582        xtty->end_head();
1583      }
1584      if (TraceDeoptimization) {  // make noise on the tty
1585        tty->print("Uncommon trap occurred in");
1586        nm->method()->print_short_name(tty);
1587        tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id());
1588#if INCLUDE_JVMCI
1589        oop installedCode = nm->jvmci_installed_code();
1590        if (installedCode != NULL) {
1591          oop installedCodeName = NULL;
1592          if (installedCode->is_a(InstalledCode::klass())) {
1593            installedCodeName = InstalledCode::name(installedCode);
1594          }
1595          if (installedCodeName != NULL) {
1596            tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName));
1597          } else {
1598            tty->print(" (JVMCI: installed code has no name) ");
1599          }
1600        } else if (nm->is_compiled_by_jvmci()) {
1601          tty->print(" (JVMCI: no installed code) ");
1602        }
1603#endif
1604        tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
1605                   p2i(fr.pc()),
1606                   os::current_thread_id(),
1607                   trap_reason_name(reason),
1608                   trap_action_name(action),
1609                   unloaded_class_index
1610#if INCLUDE_JVMCI
1611                   , debug_id
1612#endif
1613                   );
1614        if (class_name != NULL) {
1615          tty->print(unresolved ? " unresolved class: " : " symbol: ");
1616          class_name->print_symbol_on(tty);
1617        }
1618        tty->cr();
1619      }
1620      if (xtty != NULL) {
1621        // Log the precise location of the trap.
1622        for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1623          xtty->begin_elem("jvms bci='%d'", sd->bci());
1624          xtty->method(sd->method());
1625          xtty->end_elem();
1626          if (sd->is_top())  break;
1627        }
1628        xtty->tail("uncommon_trap");
1629      }
1630    }
1631    // (End diagnostic printout.)
1632
1633    // Load class if necessary
1634    if (unloaded_class_index >= 0) {
1635      constantPoolHandle constants(THREAD, trap_method->constants());
1636      load_class_by_index(constants, unloaded_class_index);
1637    }
1638
1639    // Flush the nmethod if necessary and desirable.
1640    //
1641    // We need to avoid situations where we are re-flushing the nmethod
1642    // because of a hot deoptimization site.  Repeated flushes at the same
1643    // point need to be detected by the compiler and avoided.  If the compiler
1644    // cannot avoid them (or has a bug and "refuses" to avoid them), this
1645    // module must take measures to avoid an infinite cycle of recompilation
1646    // and deoptimization.  There are several such measures:
1647    //
1648    //   1. If a recompilation is ordered a second time at some site X
1649    //   and for the same reason R, the action is adjusted to 'reinterpret',
1650    //   to give the interpreter time to exercise the method more thoroughly.
1651    //   If this happens, the method's overflow_recompile_count is incremented.
1652    //
1653    //   2. If the compiler fails to reduce the deoptimization rate, then
1654    //   the method's overflow_recompile_count will begin to exceed the set
1655    //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1656    //   is adjusted to 'make_not_compilable', and the method is abandoned
1657    //   to the interpreter.  This is a performance hit for hot methods,
1658    //   but is better than a disastrous infinite cycle of recompilations.
1659    //   (Actually, only the method containing the site X is abandoned.)
1660    //
1661    //   3. In parallel with the previous measures, if the total number of
1662    //   recompilations of a method exceeds the much larger set limit
1663    //   PerMethodRecompilationCutoff, the method is abandoned.
1664    //   This should only happen if the method is very large and has
1665    //   many "lukewarm" deoptimizations.  The code which enforces this
1666    //   limit is elsewhere (class nmethod, class Method).
1667    //
1668    // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1669    // to recompile at each bytecode independently of the per-BCI cutoff.
1670    //
1671    // The decision to update code is up to the compiler, and is encoded
1672    // in the Action_xxx code.  If the compiler requests Action_none
1673    // no trap state is changed, no compiled code is changed, and the
1674    // computation suffers along in the interpreter.
1675    //
1676    // The other action codes specify various tactics for decompilation
1677    // and recompilation.  Action_maybe_recompile is the loosest, and
1678    // allows the compiled code to stay around until enough traps are seen,
1679    // and until the compiler gets around to recompiling the trapping method.
1680    //
1681    // The other actions cause immediate removal of the present code.
1682
1683    // Traps caused by injected profile shouldn't pollute trap counts.
1684    bool injected_profile_trap = trap_method->has_injected_profile() &&
1685                                 (reason == Reason_intrinsic || reason == Reason_unreached);
1686
1687    bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
1688    bool make_not_entrant = false;
1689    bool make_not_compilable = false;
1690    bool reprofile = false;
1691    switch (action) {
1692    case Action_none:
1693      // Keep the old code.
1694      update_trap_state = false;
1695      break;
1696    case Action_maybe_recompile:
1697      // Do not need to invalidate the present code, but we can
1698      // initiate another
1699      // Start compiler without (necessarily) invalidating the nmethod.
1700      // The system will tolerate the old code, but new code should be
1701      // generated when possible.
1702      break;
1703    case Action_reinterpret:
1704      // Go back into the interpreter for a while, and then consider
1705      // recompiling form scratch.
1706      make_not_entrant = true;
1707      // Reset invocation counter for outer most method.
1708      // This will allow the interpreter to exercise the bytecodes
1709      // for a while before recompiling.
1710      // By contrast, Action_make_not_entrant is immediate.
1711      //
1712      // Note that the compiler will track null_check, null_assert,
1713      // range_check, and class_check events and log them as if they
1714      // had been traps taken from compiled code.  This will update
1715      // the MDO trap history so that the next compilation will
1716      // properly detect hot trap sites.
1717      reprofile = true;
1718      break;
1719    case Action_make_not_entrant:
1720      // Request immediate recompilation, and get rid of the old code.
1721      // Make them not entrant, so next time they are called they get
1722      // recompiled.  Unloaded classes are loaded now so recompile before next
1723      // time they are called.  Same for uninitialized.  The interpreter will
1724      // link the missing class, if any.
1725      make_not_entrant = true;
1726      break;
1727    case Action_make_not_compilable:
1728      // Give up on compiling this method at all.
1729      make_not_entrant = true;
1730      make_not_compilable = true;
1731      break;
1732    default:
1733      ShouldNotReachHere();
1734    }
1735
1736    // Setting +ProfileTraps fixes the following, on all platforms:
1737    // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1738    // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1739    // recompile relies on a MethodData* to record heroic opt failures.
1740
1741    // Whether the interpreter is producing MDO data or not, we also need
1742    // to use the MDO to detect hot deoptimization points and control
1743    // aggressive optimization.
1744    bool inc_recompile_count = false;
1745    ProfileData* pdata = NULL;
1746    if (ProfileTraps && update_trap_state && trap_mdo != NULL) {
1747      assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity");
1748      uint this_trap_count = 0;
1749      bool maybe_prior_trap = false;
1750      bool maybe_prior_recompile = false;
1751      pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
1752#if INCLUDE_JVMCI
1753                                   nm->is_compiled_by_jvmci() && nm->is_osr_method(),
1754#endif
1755                                   nm->method(),
1756                                   //outputs:
1757                                   this_trap_count,
1758                                   maybe_prior_trap,
1759                                   maybe_prior_recompile);
1760      // Because the interpreter also counts null, div0, range, and class
1761      // checks, these traps from compiled code are double-counted.
1762      // This is harmless; it just means that the PerXTrapLimit values
1763      // are in effect a little smaller than they look.
1764
1765      DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1766      if (per_bc_reason != Reason_none) {
1767        // Now take action based on the partially known per-BCI history.
1768        if (maybe_prior_trap
1769            && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1770          // If there are too many traps at this BCI, force a recompile.
1771          // This will allow the compiler to see the limit overflow, and
1772          // take corrective action, if possible.  The compiler generally
1773          // does not use the exact PerBytecodeTrapLimit value, but instead
1774          // changes its tactics if it sees any traps at all.  This provides
1775          // a little hysteresis, delaying a recompile until a trap happens
1776          // several times.
1777          //
1778          // Actually, since there is only one bit of counter per BCI,
1779          // the possible per-BCI counts are {0,1,(per-method count)}.
1780          // This produces accurate results if in fact there is only
1781          // one hot trap site, but begins to get fuzzy if there are
1782          // many sites.  For example, if there are ten sites each
1783          // trapping two or more times, they each get the blame for
1784          // all of their traps.
1785          make_not_entrant = true;
1786        }
1787
1788        // Detect repeated recompilation at the same BCI, and enforce a limit.
1789        if (make_not_entrant && maybe_prior_recompile) {
1790          // More than one recompile at this point.
1791          inc_recompile_count = maybe_prior_trap;
1792        }
1793      } else {
1794        // For reasons which are not recorded per-bytecode, we simply
1795        // force recompiles unconditionally.
1796        // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1797        make_not_entrant = true;
1798      }
1799
1800      // Go back to the compiler if there are too many traps in this method.
1801      if (this_trap_count >= per_method_trap_limit(reason)) {
1802        // If there are too many traps in this method, force a recompile.
1803        // This will allow the compiler to see the limit overflow, and
1804        // take corrective action, if possible.
1805        // (This condition is an unlikely backstop only, because the
1806        // PerBytecodeTrapLimit is more likely to take effect first,
1807        // if it is applicable.)
1808        make_not_entrant = true;
1809      }
1810
1811      // Here's more hysteresis:  If there has been a recompile at
1812      // this trap point already, run the method in the interpreter
1813      // for a while to exercise it more thoroughly.
1814      if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1815        reprofile = true;
1816      }
1817    }
1818
1819    // Take requested actions on the method:
1820
1821    // Recompile
1822    if (make_not_entrant) {
1823      if (!nm->make_not_entrant()) {
1824        return; // the call did not change nmethod's state
1825      }
1826
1827      if (pdata != NULL) {
1828        // Record the recompilation event, if any.
1829        int tstate0 = pdata->trap_state();
1830        int tstate1 = trap_state_set_recompiled(tstate0, true);
1831        if (tstate1 != tstate0)
1832          pdata->set_trap_state(tstate1);
1833      }
1834
1835#if INCLUDE_RTM_OPT
1836      // Restart collecting RTM locking abort statistic if the method
1837      // is recompiled for a reason other than RTM state change.
1838      // Assume that in new recompiled code the statistic could be different,
1839      // for example, due to different inlining.
1840      if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
1841          UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) {
1842        trap_mdo->atomic_set_rtm_state(ProfileRTM);
1843      }
1844#endif
1845      // For code aging we count traps separately here, using make_not_entrant()
1846      // as a guard against simultaneous deopts in multiple threads.
1847      if (reason == Reason_tenured && trap_mdo != NULL) {
1848        trap_mdo->inc_tenure_traps();
1849      }
1850    }
1851
1852    if (inc_recompile_count) {
1853      trap_mdo->inc_overflow_recompile_count();
1854      if ((uint)trap_mdo->overflow_recompile_count() >
1855          (uint)PerBytecodeRecompilationCutoff) {
1856        // Give up on the method containing the bad BCI.
1857        if (trap_method() == nm->method()) {
1858          make_not_compilable = true;
1859        } else {
1860          trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff");
1861          // But give grace to the enclosing nm->method().
1862        }
1863      }
1864    }
1865
1866    // Reprofile
1867    if (reprofile) {
1868      CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
1869    }
1870
1871    // Give up compiling
1872    if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
1873      assert(make_not_entrant, "consistent");
1874      nm->method()->set_not_compilable(CompLevel_full_optimization);
1875    }
1876
1877  } // Free marked resources
1878
1879}
1880JRT_END
1881
1882ProfileData*
1883Deoptimization::query_update_method_data(MethodData* trap_mdo,
1884                                         int trap_bci,
1885                                         Deoptimization::DeoptReason reason,
1886                                         bool update_total_trap_count,
1887#if INCLUDE_JVMCI
1888                                         bool is_osr,
1889#endif
1890                                         Method* compiled_method,
1891                                         //outputs:
1892                                         uint& ret_this_trap_count,
1893                                         bool& ret_maybe_prior_trap,
1894                                         bool& ret_maybe_prior_recompile) {
1895  bool maybe_prior_trap = false;
1896  bool maybe_prior_recompile = false;
1897  uint this_trap_count = 0;
1898  if (update_total_trap_count) {
1899    uint idx = reason;
1900#if INCLUDE_JVMCI
1901    if (is_osr) {
1902      idx += Reason_LIMIT;
1903    }
1904#endif
1905    uint prior_trap_count = trap_mdo->trap_count(idx);
1906    this_trap_count  = trap_mdo->inc_trap_count(idx);
1907
1908    // If the runtime cannot find a place to store trap history,
1909    // it is estimated based on the general condition of the method.
1910    // If the method has ever been recompiled, or has ever incurred
1911    // a trap with the present reason , then this BCI is assumed
1912    // (pessimistically) to be the culprit.
1913    maybe_prior_trap      = (prior_trap_count != 0);
1914    maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
1915  }
1916  ProfileData* pdata = NULL;
1917
1918
1919  // For reasons which are recorded per bytecode, we check per-BCI data.
1920  DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1921  assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
1922  if (per_bc_reason != Reason_none) {
1923    // Find the profile data for this BCI.  If there isn't one,
1924    // try to allocate one from the MDO's set of spares.
1925    // This will let us detect a repeated trap at this point.
1926    pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
1927
1928    if (pdata != NULL) {
1929      if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
1930        if (LogCompilation && xtty != NULL) {
1931          ttyLocker ttyl;
1932          // no more room for speculative traps in this MDO
1933          xtty->elem("speculative_traps_oom");
1934        }
1935      }
1936      // Query the trap state of this profile datum.
1937      int tstate0 = pdata->trap_state();
1938      if (!trap_state_has_reason(tstate0, per_bc_reason))
1939        maybe_prior_trap = false;
1940      if (!trap_state_is_recompiled(tstate0))
1941        maybe_prior_recompile = false;
1942
1943      // Update the trap state of this profile datum.
1944      int tstate1 = tstate0;
1945      // Record the reason.
1946      tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
1947      // Store the updated state on the MDO, for next time.
1948      if (tstate1 != tstate0)
1949        pdata->set_trap_state(tstate1);
1950    } else {
1951      if (LogCompilation && xtty != NULL) {
1952        ttyLocker ttyl;
1953        // Missing MDP?  Leave a small complaint in the log.
1954        xtty->elem("missing_mdp bci='%d'", trap_bci);
1955      }
1956    }
1957  }
1958
1959  // Return results:
1960  ret_this_trap_count = this_trap_count;
1961  ret_maybe_prior_trap = maybe_prior_trap;
1962  ret_maybe_prior_recompile = maybe_prior_recompile;
1963  return pdata;
1964}
1965
1966void
1967Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
1968  ResourceMark rm;
1969  // Ignored outputs:
1970  uint ignore_this_trap_count;
1971  bool ignore_maybe_prior_trap;
1972  bool ignore_maybe_prior_recompile;
1973  assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
1974  // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
1975  bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true);
1976  query_update_method_data(trap_mdo, trap_bci,
1977                           (DeoptReason)reason,
1978                           update_total_counts,
1979#if INCLUDE_JVMCI
1980                           false,
1981#endif
1982                           NULL,
1983                           ignore_this_trap_count,
1984                           ignore_maybe_prior_trap,
1985                           ignore_maybe_prior_recompile);
1986}
1987
1988Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
1989  if (TraceDeoptimization) {
1990    tty->print("Uncommon trap ");
1991  }
1992  // Still in Java no safepoints
1993  {
1994    // This enters VM and may safepoint
1995    uncommon_trap_inner(thread, trap_request);
1996  }
1997  return fetch_unroll_info_helper(thread);
1998}
1999
2000// Local derived constants.
2001// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2002const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
2003const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2004
2005//---------------------------trap_state_reason---------------------------------
2006Deoptimization::DeoptReason
2007Deoptimization::trap_state_reason(int trap_state) {
2008  // This assert provides the link between the width of DataLayout::trap_bits
2009  // and the encoding of "recorded" reasons.  It ensures there are enough
2010  // bits to store all needed reasons in the per-BCI MDO profile.
2011  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2012  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2013  trap_state -= recompile_bit;
2014  if (trap_state == DS_REASON_MASK) {
2015    return Reason_many;
2016  } else {
2017    assert((int)Reason_none == 0, "state=0 => Reason_none");
2018    return (DeoptReason)trap_state;
2019  }
2020}
2021//-------------------------trap_state_has_reason-------------------------------
2022int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2023  assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2024  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2025  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2026  trap_state -= recompile_bit;
2027  if (trap_state == DS_REASON_MASK) {
2028    return -1;  // true, unspecifically (bottom of state lattice)
2029  } else if (trap_state == reason) {
2030    return 1;   // true, definitely
2031  } else if (trap_state == 0) {
2032    return 0;   // false, definitely (top of state lattice)
2033  } else {
2034    return 0;   // false, definitely
2035  }
2036}
2037//-------------------------trap_state_add_reason-------------------------------
2038int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2039  assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2040  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2041  trap_state -= recompile_bit;
2042  if (trap_state == DS_REASON_MASK) {
2043    return trap_state + recompile_bit;     // already at state lattice bottom
2044  } else if (trap_state == reason) {
2045    return trap_state + recompile_bit;     // the condition is already true
2046  } else if (trap_state == 0) {
2047    return reason + recompile_bit;          // no condition has yet been true
2048  } else {
2049    return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
2050  }
2051}
2052//-----------------------trap_state_is_recompiled------------------------------
2053bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2054  return (trap_state & DS_RECOMPILE_BIT) != 0;
2055}
2056//-----------------------trap_state_set_recompiled-----------------------------
2057int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2058  if (z)  return trap_state |  DS_RECOMPILE_BIT;
2059  else    return trap_state & ~DS_RECOMPILE_BIT;
2060}
2061//---------------------------format_trap_state---------------------------------
2062// This is used for debugging and diagnostics, including LogFile output.
2063const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2064                                              int trap_state) {
2065  DeoptReason reason      = trap_state_reason(trap_state);
2066  bool        recomp_flag = trap_state_is_recompiled(trap_state);
2067  // Re-encode the state from its decoded components.
2068  int decoded_state = 0;
2069  if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2070    decoded_state = trap_state_add_reason(decoded_state, reason);
2071  if (recomp_flag)
2072    decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2073  // If the state re-encodes properly, format it symbolically.
2074  // Because this routine is used for debugging and diagnostics,
2075  // be robust even if the state is a strange value.
2076  size_t len;
2077  if (decoded_state != trap_state) {
2078    // Random buggy state that doesn't decode??
2079    len = jio_snprintf(buf, buflen, "#%d", trap_state);
2080  } else {
2081    len = jio_snprintf(buf, buflen, "%s%s",
2082                       trap_reason_name(reason),
2083                       recomp_flag ? " recompiled" : "");
2084  }
2085  if (len >= buflen)
2086    buf[buflen-1] = '\0';
2087  return buf;
2088}
2089
2090
2091//--------------------------------statics--------------------------------------
2092const char* Deoptimization::_trap_reason_name[] = {
2093  // Note:  Keep this in sync. with enum DeoptReason.
2094  "none",
2095  "null_check",
2096  "null_assert" JVMCI_ONLY("_or_unreached0"),
2097  "range_check",
2098  "class_check",
2099  "array_check",
2100  "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2101  "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2102  "unloaded",
2103  "uninitialized",
2104  "unreached",
2105  "unhandled",
2106  "constraint",
2107  "div0_check",
2108  "age",
2109  "predicate",
2110  "loop_limit_check",
2111  "speculate_class_check",
2112  "speculate_null_check",
2113  "rtm_state_change",
2114  "unstable_if",
2115  "unstable_fused_if",
2116#if INCLUDE_JVMCI
2117  "aliasing",
2118  "transfer_to_interpreter",
2119  "not_compiled_exception_handler",
2120  "unresolved",
2121  "jsr_mismatch",
2122#endif
2123  "tenured"
2124};
2125const char* Deoptimization::_trap_action_name[] = {
2126  // Note:  Keep this in sync. with enum DeoptAction.
2127  "none",
2128  "maybe_recompile",
2129  "reinterpret",
2130  "make_not_entrant",
2131  "make_not_compilable"
2132};
2133
2134const char* Deoptimization::trap_reason_name(int reason) {
2135  // Check that every reason has a name
2136  STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2137
2138  if (reason == Reason_many)  return "many";
2139  if ((uint)reason < Reason_LIMIT)
2140    return _trap_reason_name[reason];
2141  static char buf[20];
2142  sprintf(buf, "reason%d", reason);
2143  return buf;
2144}
2145const char* Deoptimization::trap_action_name(int action) {
2146  // Check that every action has a name
2147  STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2148
2149  if ((uint)action < Action_LIMIT)
2150    return _trap_action_name[action];
2151  static char buf[20];
2152  sprintf(buf, "action%d", action);
2153  return buf;
2154}
2155
2156// This is used for debugging and diagnostics, including LogFile output.
2157const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2158                                                int trap_request) {
2159  jint unloaded_class_index = trap_request_index(trap_request);
2160  const char* reason = trap_reason_name(trap_request_reason(trap_request));
2161  const char* action = trap_action_name(trap_request_action(trap_request));
2162#if INCLUDE_JVMCI
2163  int debug_id = trap_request_debug_id(trap_request);
2164#endif
2165  size_t len;
2166  if (unloaded_class_index < 0) {
2167    len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2168                       reason, action
2169#if INCLUDE_JVMCI
2170                       ,debug_id
2171#endif
2172                       );
2173  } else {
2174    len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2175                       reason, action, unloaded_class_index
2176#if INCLUDE_JVMCI
2177                       ,debug_id
2178#endif
2179                       );
2180  }
2181  if (len >= buflen)
2182    buf[buflen-1] = '\0';
2183  return buf;
2184}
2185
2186juint Deoptimization::_deoptimization_hist
2187        [Deoptimization::Reason_LIMIT]
2188    [1 + Deoptimization::Action_LIMIT]
2189        [Deoptimization::BC_CASE_LIMIT]
2190  = {0};
2191
2192enum {
2193  LSB_BITS = 8,
2194  LSB_MASK = right_n_bits(LSB_BITS)
2195};
2196
2197void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2198                                       Bytecodes::Code bc) {
2199  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2200  assert(action >= 0 && action < Action_LIMIT, "oob");
2201  _deoptimization_hist[Reason_none][0][0] += 1;  // total
2202  _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
2203  juint* cases = _deoptimization_hist[reason][1+action];
2204  juint* bc_counter_addr = NULL;
2205  juint  bc_counter      = 0;
2206  // Look for an unused counter, or an exact match to this BC.
2207  if (bc != Bytecodes::_illegal) {
2208    for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2209      juint* counter_addr = &cases[bc_case];
2210      juint  counter = *counter_addr;
2211      if ((counter == 0 && bc_counter_addr == NULL)
2212          || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2213        // this counter is either free or is already devoted to this BC
2214        bc_counter_addr = counter_addr;
2215        bc_counter = counter | bc;
2216      }
2217    }
2218  }
2219  if (bc_counter_addr == NULL) {
2220    // Overflow, or no given bytecode.
2221    bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2222    bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
2223  }
2224  *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2225}
2226
2227jint Deoptimization::total_deoptimization_count() {
2228  return _deoptimization_hist[Reason_none][0][0];
2229}
2230
2231jint Deoptimization::deoptimization_count(DeoptReason reason) {
2232  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2233  return _deoptimization_hist[reason][0][0];
2234}
2235
2236void Deoptimization::print_statistics() {
2237  juint total = total_deoptimization_count();
2238  juint account = total;
2239  if (total != 0) {
2240    ttyLocker ttyl;
2241    if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
2242    tty->print_cr("Deoptimization traps recorded:");
2243    #define PRINT_STAT_LINE(name, r) \
2244      tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2245    PRINT_STAT_LINE("total", total);
2246    // For each non-zero entry in the histogram, print the reason,
2247    // the action, and (if specifically known) the type of bytecode.
2248    for (int reason = 0; reason < Reason_LIMIT; reason++) {
2249      for (int action = 0; action < Action_LIMIT; action++) {
2250        juint* cases = _deoptimization_hist[reason][1+action];
2251        for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2252          juint counter = cases[bc_case];
2253          if (counter != 0) {
2254            char name[1*K];
2255            Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2256            if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2257              bc = Bytecodes::_illegal;
2258            sprintf(name, "%s/%s/%s",
2259                    trap_reason_name(reason),
2260                    trap_action_name(action),
2261                    Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2262            juint r = counter >> LSB_BITS;
2263            tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2264            account -= r;
2265          }
2266        }
2267      }
2268    }
2269    if (account != 0) {
2270      PRINT_STAT_LINE("unaccounted", account);
2271    }
2272    #undef PRINT_STAT_LINE
2273    if (xtty != NULL)  xtty->tail("statistics");
2274  }
2275}
2276#else // COMPILER2 || SHARK || INCLUDE_JVMCI
2277
2278
2279// Stubs for C1 only system.
2280bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2281  return false;
2282}
2283
2284const char* Deoptimization::trap_reason_name(int reason) {
2285  return "unknown";
2286}
2287
2288void Deoptimization::print_statistics() {
2289  // no output
2290}
2291
2292void
2293Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2294  // no udpate
2295}
2296
2297int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2298  return 0;
2299}
2300
2301void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2302                                       Bytecodes::Code bc) {
2303  // no update
2304}
2305
2306const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2307                                              int trap_state) {
2308  jio_snprintf(buf, buflen, "#%d", trap_state);
2309  return buf;
2310}
2311
2312#endif // COMPILER2 || SHARK || INCLUDE_JVMCI
2313