deoptimization.cpp revision 3890:d2f8c38e543d
1/*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "code/debugInfoRec.hpp"
28#include "code/nmethod.hpp"
29#include "code/pcDesc.hpp"
30#include "code/scopeDesc.hpp"
31#include "interpreter/bytecode.hpp"
32#include "interpreter/interpreter.hpp"
33#include "interpreter/oopMapCache.hpp"
34#include "memory/allocation.inline.hpp"
35#include "memory/oopFactory.hpp"
36#include "memory/resourceArea.hpp"
37#include "oops/method.hpp"
38#include "oops/oop.inline.hpp"
39#include "prims/jvmtiThreadState.hpp"
40#include "runtime/biasedLocking.hpp"
41#include "runtime/compilationPolicy.hpp"
42#include "runtime/deoptimization.hpp"
43#include "runtime/interfaceSupport.hpp"
44#include "runtime/sharedRuntime.hpp"
45#include "runtime/signature.hpp"
46#include "runtime/stubRoutines.hpp"
47#include "runtime/thread.hpp"
48#include "runtime/vframe.hpp"
49#include "runtime/vframeArray.hpp"
50#include "runtime/vframe_hp.hpp"
51#include "utilities/events.hpp"
52#include "utilities/xmlstream.hpp"
53#ifdef TARGET_ARCH_x86
54# include "vmreg_x86.inline.hpp"
55#endif
56#ifdef TARGET_ARCH_sparc
57# include "vmreg_sparc.inline.hpp"
58#endif
59#ifdef TARGET_ARCH_zero
60# include "vmreg_zero.inline.hpp"
61#endif
62#ifdef TARGET_ARCH_arm
63# include "vmreg_arm.inline.hpp"
64#endif
65#ifdef TARGET_ARCH_ppc
66# include "vmreg_ppc.inline.hpp"
67#endif
68#ifdef COMPILER2
69#ifdef TARGET_ARCH_MODEL_x86_32
70# include "adfiles/ad_x86_32.hpp"
71#endif
72#ifdef TARGET_ARCH_MODEL_x86_64
73# include "adfiles/ad_x86_64.hpp"
74#endif
75#ifdef TARGET_ARCH_MODEL_sparc
76# include "adfiles/ad_sparc.hpp"
77#endif
78#ifdef TARGET_ARCH_MODEL_zero
79# include "adfiles/ad_zero.hpp"
80#endif
81#ifdef TARGET_ARCH_MODEL_arm
82# include "adfiles/ad_arm.hpp"
83#endif
84#ifdef TARGET_ARCH_MODEL_ppc
85# include "adfiles/ad_ppc.hpp"
86#endif
87#endif
88
89bool DeoptimizationMarker::_is_active = false;
90
91Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
92                                         int  caller_adjustment,
93                                         int  caller_actual_parameters,
94                                         int  number_of_frames,
95                                         intptr_t* frame_sizes,
96                                         address* frame_pcs,
97                                         BasicType return_type) {
98  _size_of_deoptimized_frame = size_of_deoptimized_frame;
99  _caller_adjustment         = caller_adjustment;
100  _caller_actual_parameters  = caller_actual_parameters;
101  _number_of_frames          = number_of_frames;
102  _frame_sizes               = frame_sizes;
103  _frame_pcs                 = frame_pcs;
104  _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
105  _return_type               = return_type;
106  _initial_info              = 0;
107  // PD (x86 only)
108  _counter_temp              = 0;
109  _unpack_kind               = 0;
110  _sender_sp_temp            = 0;
111
112  _total_frame_sizes         = size_of_frames();
113}
114
115
116Deoptimization::UnrollBlock::~UnrollBlock() {
117  FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes, mtCompiler);
118  FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs, mtCompiler);
119  FREE_C_HEAP_ARRAY(intptr_t, _register_block, mtCompiler);
120}
121
122
123intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
124  assert(register_number < RegisterMap::reg_count, "checking register number");
125  return &_register_block[register_number * 2];
126}
127
128
129
130int Deoptimization::UnrollBlock::size_of_frames() const {
131  // Acount first for the adjustment of the initial frame
132  int result = _caller_adjustment;
133  for (int index = 0; index < number_of_frames(); index++) {
134    result += frame_sizes()[index];
135  }
136  return result;
137}
138
139
140void Deoptimization::UnrollBlock::print() {
141  ttyLocker ttyl;
142  tty->print_cr("UnrollBlock");
143  tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
144  tty->print(   "  frame_sizes: ");
145  for (int index = 0; index < number_of_frames(); index++) {
146    tty->print("%d ", frame_sizes()[index]);
147  }
148  tty->cr();
149}
150
151
152// In order to make fetch_unroll_info work properly with escape
153// analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
154// ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
155// of previously eliminated objects occurs in realloc_objects, which is
156// called from the method fetch_unroll_info_helper below.
157JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
158  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
159  // but makes the entry a little slower. There is however a little dance we have to
160  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
161
162  // fetch_unroll_info() is called at the beginning of the deoptimization
163  // handler. Note this fact before we start generating temporary frames
164  // that can confuse an asynchronous stack walker. This counter is
165  // decremented at the end of unpack_frames().
166  thread->inc_in_deopt_handler();
167
168  return fetch_unroll_info_helper(thread);
169JRT_END
170
171
172// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
173Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
174
175  // Note: there is a safepoint safety issue here. No matter whether we enter
176  // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
177  // the vframeArray is created.
178  //
179
180  // Allocate our special deoptimization ResourceMark
181  DeoptResourceMark* dmark = new DeoptResourceMark(thread);
182  assert(thread->deopt_mark() == NULL, "Pending deopt!");
183  thread->set_deopt_mark(dmark);
184
185  frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
186  RegisterMap map(thread, true);
187  RegisterMap dummy_map(thread, false);
188  // Now get the deoptee with a valid map
189  frame deoptee = stub_frame.sender(&map);
190  // Set the deoptee nmethod
191  assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
192  thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
193
194  if (VerifyStack) {
195    thread->validate_frame_layout();
196  }
197
198  // Create a growable array of VFrames where each VFrame represents an inlined
199  // Java frame.  This storage is allocated with the usual system arena.
200  assert(deoptee.is_compiled_frame(), "Wrong frame type");
201  GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
202  vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
203  while (!vf->is_top()) {
204    assert(vf->is_compiled_frame(), "Wrong frame type");
205    chunk->push(compiledVFrame::cast(vf));
206    vf = vf->sender();
207  }
208  assert(vf->is_compiled_frame(), "Wrong frame type");
209  chunk->push(compiledVFrame::cast(vf));
210
211#ifdef COMPILER2
212  // Reallocate the non-escaping objects and restore their fields. Then
213  // relock objects if synchronization on them was eliminated.
214  if (DoEscapeAnalysis || EliminateNestedLocks) {
215    if (EliminateAllocations) {
216      assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
217      GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
218
219      // The flag return_oop() indicates call sites which return oop
220      // in compiled code. Such sites include java method calls,
221      // runtime calls (for example, used to allocate new objects/arrays
222      // on slow code path) and any other calls generated in compiled code.
223      // It is not guaranteed that we can get such information here only
224      // by analyzing bytecode in deoptimized frames. This is why this flag
225      // is set during method compilation (see Compile::Process_OopMap_Node()).
226      bool save_oop_result = chunk->at(0)->scope()->return_oop();
227      Handle return_value;
228      if (save_oop_result) {
229        // Reallocation may trigger GC. If deoptimization happened on return from
230        // call which returns oop we need to save it since it is not in oopmap.
231        oop result = deoptee.saved_oop_result(&map);
232        assert(result == NULL || result->is_oop(), "must be oop");
233        return_value = Handle(thread, result);
234        assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
235        if (TraceDeoptimization) {
236          ttyLocker ttyl;
237          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
238        }
239      }
240      bool reallocated = false;
241      if (objects != NULL) {
242        JRT_BLOCK
243          reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
244        JRT_END
245      }
246      if (reallocated) {
247        reassign_fields(&deoptee, &map, objects);
248#ifndef PRODUCT
249        if (TraceDeoptimization) {
250          ttyLocker ttyl;
251          tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
252          print_objects(objects);
253        }
254#endif
255      }
256      if (save_oop_result) {
257        // Restore result.
258        deoptee.set_saved_oop_result(&map, return_value());
259      }
260    }
261    if (EliminateLocks) {
262#ifndef PRODUCT
263      bool first = true;
264#endif
265      for (int i = 0; i < chunk->length(); i++) {
266        compiledVFrame* cvf = chunk->at(i);
267        assert (cvf->scope() != NULL,"expect only compiled java frames");
268        GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
269        if (monitors->is_nonempty()) {
270          relock_objects(monitors, thread);
271#ifndef PRODUCT
272          if (TraceDeoptimization) {
273            ttyLocker ttyl;
274            for (int j = 0; j < monitors->length(); j++) {
275              MonitorInfo* mi = monitors->at(j);
276              if (mi->eliminated()) {
277                if (first) {
278                  first = false;
279                  tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
280                }
281                tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
282              }
283            }
284          }
285#endif
286        }
287      }
288    }
289  }
290#endif // COMPILER2
291  // Ensure that no safepoint is taken after pointers have been stored
292  // in fields of rematerialized objects.  If a safepoint occurs from here on
293  // out the java state residing in the vframeArray will be missed.
294  No_Safepoint_Verifier no_safepoint;
295
296  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
297
298  assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
299  thread->set_vframe_array_head(array);
300
301  // Now that the vframeArray has been created if we have any deferred local writes
302  // added by jvmti then we can free up that structure as the data is now in the
303  // vframeArray
304
305  if (thread->deferred_locals() != NULL) {
306    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
307    int i = 0;
308    do {
309      // Because of inlining we could have multiple vframes for a single frame
310      // and several of the vframes could have deferred writes. Find them all.
311      if (list->at(i)->id() == array->original().id()) {
312        jvmtiDeferredLocalVariableSet* dlv = list->at(i);
313        list->remove_at(i);
314        // individual jvmtiDeferredLocalVariableSet are CHeapObj's
315        delete dlv;
316      } else {
317        i++;
318      }
319    } while ( i < list->length() );
320    if (list->length() == 0) {
321      thread->set_deferred_locals(NULL);
322      // free the list and elements back to C heap.
323      delete list;
324    }
325
326  }
327
328#ifndef SHARK
329  // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
330  CodeBlob* cb = stub_frame.cb();
331  // Verify we have the right vframeArray
332  assert(cb->frame_size() >= 0, "Unexpected frame size");
333  intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
334
335  // If the deopt call site is a MethodHandle invoke call site we have
336  // to adjust the unpack_sp.
337  nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
338  if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
339    unpack_sp = deoptee.unextended_sp();
340
341#ifdef ASSERT
342  assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
343#endif
344#else
345  intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
346#endif // !SHARK
347
348  // This is a guarantee instead of an assert because if vframe doesn't match
349  // we will unpack the wrong deoptimized frame and wind up in strange places
350  // where it will be very difficult to figure out what went wrong. Better
351  // to die an early death here than some very obscure death later when the
352  // trail is cold.
353  // Note: on ia64 this guarantee can be fooled by frames with no memory stack
354  // in that it will fail to detect a problem when there is one. This needs
355  // more work in tiger timeframe.
356  guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
357
358  int number_of_frames = array->frames();
359
360  // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
361  // virtual activation, which is the reverse of the elements in the vframes array.
362  intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
363  // +1 because we always have an interpreter return address for the final slot.
364  address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
365  int popframe_extra_args = 0;
366  // Create an interpreter return address for the stub to use as its return
367  // address so the skeletal frames are perfectly walkable
368  frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
369
370  // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
371  // activation be put back on the expression stack of the caller for reexecution
372  if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
373    popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
374  }
375
376  // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
377  // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
378  // than simply use array->sender.pc(). This requires us to walk the current set of frames
379  //
380  frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
381  deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
382
383  // It's possible that the number of paramters at the call site is
384  // different than number of arguments in the callee when method
385  // handles are used.  If the caller is interpreted get the real
386  // value so that the proper amount of space can be added to it's
387  // frame.
388  bool caller_was_method_handle = false;
389  if (deopt_sender.is_interpreted_frame()) {
390    methodHandle method = deopt_sender.interpreter_frame_method();
391    Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
392    if (cur.is_invokedynamic() || cur.is_invokehandle()) {
393      // Method handle invokes may involve fairly arbitrary chains of
394      // calls so it's impossible to know how much actual space the
395      // caller has for locals.
396      caller_was_method_handle = true;
397    }
398  }
399
400  //
401  // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
402  // frame_sizes/frame_pcs[1] next oldest frame (int)
403  // frame_sizes/frame_pcs[n] youngest frame (int)
404  //
405  // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
406  // owns the space for the return address to it's caller).  Confusing ain't it.
407  //
408  // The vframe array can address vframes with indices running from
409  // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
410  // When we create the skeletal frames we need the oldest frame to be in the zero slot
411  // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
412  // so things look a little strange in this loop.
413  //
414  int callee_parameters = 0;
415  int callee_locals = 0;
416  for (int index = 0; index < array->frames(); index++ ) {
417    // frame[number_of_frames - 1 ] = on_stack_size(youngest)
418    // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
419    // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
420    int caller_parms = callee_parameters;
421    if ((index == array->frames() - 1) && caller_was_method_handle) {
422      caller_parms = 0;
423    }
424    frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(caller_parms,
425                                                                                                    callee_parameters,
426                                                                                                    callee_locals,
427                                                                                                    index == 0,
428                                                                                                    popframe_extra_args);
429    // This pc doesn't have to be perfect just good enough to identify the frame
430    // as interpreted so the skeleton frame will be walkable
431    // The correct pc will be set when the skeleton frame is completely filled out
432    // The final pc we store in the loop is wrong and will be overwritten below
433    frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
434
435    callee_parameters = array->element(index)->method()->size_of_parameters();
436    callee_locals = array->element(index)->method()->max_locals();
437    popframe_extra_args = 0;
438  }
439
440  // Compute whether the root vframe returns a float or double value.
441  BasicType return_type;
442  {
443    HandleMark hm;
444    methodHandle method(thread, array->element(0)->method());
445    Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
446    return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
447  }
448
449  // Compute information for handling adapters and adjusting the frame size of the caller.
450  int caller_adjustment = 0;
451
452  // Compute the amount the oldest interpreter frame will have to adjust
453  // its caller's stack by. If the caller is a compiled frame then
454  // we pretend that the callee has no parameters so that the
455  // extension counts for the full amount of locals and not just
456  // locals-parms. This is because without a c2i adapter the parm
457  // area as created by the compiled frame will not be usable by
458  // the interpreter. (Depending on the calling convention there
459  // may not even be enough space).
460
461  // QQQ I'd rather see this pushed down into last_frame_adjust
462  // and have it take the sender (aka caller).
463
464  if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
465    caller_adjustment = last_frame_adjust(0, callee_locals);
466  } else if (callee_locals > callee_parameters) {
467    // The caller frame may need extending to accommodate
468    // non-parameter locals of the first unpacked interpreted frame.
469    // Compute that adjustment.
470    caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
471  }
472
473  // If the sender is deoptimized the we must retrieve the address of the handler
474  // since the frame will "magically" show the original pc before the deopt
475  // and we'd undo the deopt.
476
477  frame_pcs[0] = deopt_sender.raw_pc();
478
479#ifndef SHARK
480  assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
481#endif // SHARK
482
483  UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
484                                      caller_adjustment * BytesPerWord,
485                                      caller_was_method_handle ? 0 : callee_parameters,
486                                      number_of_frames,
487                                      frame_sizes,
488                                      frame_pcs,
489                                      return_type);
490  // On some platforms, we need a way to pass some platform dependent
491  // information to the unpacking code so the skeletal frames come out
492  // correct (initial fp value, unextended sp, ...)
493  info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
494
495  if (array->frames() > 1) {
496    if (VerifyStack && TraceDeoptimization) {
497      ttyLocker ttyl;
498      tty->print_cr("Deoptimizing method containing inlining");
499    }
500  }
501
502  array->set_unroll_block(info);
503  return info;
504}
505
506// Called to cleanup deoptimization data structures in normal case
507// after unpacking to stack and when stack overflow error occurs
508void Deoptimization::cleanup_deopt_info(JavaThread *thread,
509                                        vframeArray *array) {
510
511  // Get array if coming from exception
512  if (array == NULL) {
513    array = thread->vframe_array_head();
514  }
515  thread->set_vframe_array_head(NULL);
516
517  // Free the previous UnrollBlock
518  vframeArray* old_array = thread->vframe_array_last();
519  thread->set_vframe_array_last(array);
520
521  if (old_array != NULL) {
522    UnrollBlock* old_info = old_array->unroll_block();
523    old_array->set_unroll_block(NULL);
524    delete old_info;
525    delete old_array;
526  }
527
528  // Deallocate any resource creating in this routine and any ResourceObjs allocated
529  // inside the vframeArray (StackValueCollections)
530
531  delete thread->deopt_mark();
532  thread->set_deopt_mark(NULL);
533  thread->set_deopt_nmethod(NULL);
534
535
536  if (JvmtiExport::can_pop_frame()) {
537#ifndef CC_INTERP
538    // Regardless of whether we entered this routine with the pending
539    // popframe condition bit set, we should always clear it now
540    thread->clear_popframe_condition();
541#else
542    // C++ interpeter will clear has_pending_popframe when it enters
543    // with method_resume. For deopt_resume2 we clear it now.
544    if (thread->popframe_forcing_deopt_reexecution())
545        thread->clear_popframe_condition();
546#endif /* CC_INTERP */
547  }
548
549  // unpack_frames() is called at the end of the deoptimization handler
550  // and (in C2) at the end of the uncommon trap handler. Note this fact
551  // so that an asynchronous stack walker can work again. This counter is
552  // incremented at the beginning of fetch_unroll_info() and (in C2) at
553  // the beginning of uncommon_trap().
554  thread->dec_in_deopt_handler();
555}
556
557
558// Return BasicType of value being returned
559JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
560
561  // We are already active int he special DeoptResourceMark any ResourceObj's we
562  // allocate will be freed at the end of the routine.
563
564  // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
565  // but makes the entry a little slower. There is however a little dance we have to
566  // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
567  ResetNoHandleMark rnhm; // No-op in release/product versions
568  HandleMark hm;
569
570  frame stub_frame = thread->last_frame();
571
572  // Since the frame to unpack is the top frame of this thread, the vframe_array_head
573  // must point to the vframeArray for the unpack frame.
574  vframeArray* array = thread->vframe_array_head();
575
576#ifndef PRODUCT
577  if (TraceDeoptimization) {
578    ttyLocker ttyl;
579    tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
580  }
581#endif
582  Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
583              stub_frame.pc(), stub_frame.sp(), exec_mode);
584
585  UnrollBlock* info = array->unroll_block();
586
587  // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
588  array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
589
590  BasicType bt = info->return_type();
591
592  // If we have an exception pending, claim that the return type is an oop
593  // so the deopt_blob does not overwrite the exception_oop.
594
595  if (exec_mode == Unpack_exception)
596    bt = T_OBJECT;
597
598  // Cleanup thread deopt data
599  cleanup_deopt_info(thread, array);
600
601#ifndef PRODUCT
602  if (VerifyStack) {
603    ResourceMark res_mark;
604
605    thread->validate_frame_layout();
606
607    // Verify that the just-unpacked frames match the interpreter's
608    // notions of expression stack and locals
609    vframeArray* cur_array = thread->vframe_array_last();
610    RegisterMap rm(thread, false);
611    rm.set_include_argument_oops(false);
612    bool is_top_frame = true;
613    int callee_size_of_parameters = 0;
614    int callee_max_locals = 0;
615    for (int i = 0; i < cur_array->frames(); i++) {
616      vframeArrayElement* el = cur_array->element(i);
617      frame* iframe = el->iframe();
618      guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
619
620      // Get the oop map for this bci
621      InterpreterOopMap mask;
622      int cur_invoke_parameter_size = 0;
623      bool try_next_mask = false;
624      int next_mask_expression_stack_size = -1;
625      int top_frame_expression_stack_adjustment = 0;
626      methodHandle mh(thread, iframe->interpreter_frame_method());
627      OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
628      BytecodeStream str(mh);
629      str.set_start(iframe->interpreter_frame_bci());
630      int max_bci = mh->code_size();
631      // Get to the next bytecode if possible
632      assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
633      // Check to see if we can grab the number of outgoing arguments
634      // at an uncommon trap for an invoke (where the compiler
635      // generates debug info before the invoke has executed)
636      Bytecodes::Code cur_code = str.next();
637      if (cur_code == Bytecodes::_invokevirtual ||
638          cur_code == Bytecodes::_invokespecial ||
639          cur_code == Bytecodes::_invokestatic  ||
640          cur_code == Bytecodes::_invokeinterface) {
641        Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
642        Symbol* signature = invoke.signature();
643        ArgumentSizeComputer asc(signature);
644        cur_invoke_parameter_size = asc.size();
645        if (cur_code != Bytecodes::_invokestatic) {
646          // Add in receiver
647          ++cur_invoke_parameter_size;
648        }
649      }
650      if (str.bci() < max_bci) {
651        Bytecodes::Code bc = str.next();
652        if (bc >= 0) {
653          // The interpreter oop map generator reports results before
654          // the current bytecode has executed except in the case of
655          // calls. It seems to be hard to tell whether the compiler
656          // has emitted debug information matching the "state before"
657          // a given bytecode or the state after, so we try both
658          switch (cur_code) {
659            case Bytecodes::_invokevirtual:
660            case Bytecodes::_invokespecial:
661            case Bytecodes::_invokestatic:
662            case Bytecodes::_invokeinterface:
663            case Bytecodes::_athrow:
664              break;
665            default: {
666              InterpreterOopMap next_mask;
667              OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
668              next_mask_expression_stack_size = next_mask.expression_stack_size();
669              // Need to subtract off the size of the result type of
670              // the bytecode because this is not described in the
671              // debug info but returned to the interpreter in the TOS
672              // caching register
673              BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
674              if (bytecode_result_type != T_ILLEGAL) {
675                top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
676              }
677              assert(top_frame_expression_stack_adjustment >= 0, "");
678              try_next_mask = true;
679              break;
680            }
681          }
682        }
683      }
684
685      // Verify stack depth and oops in frame
686      // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
687      if (!(
688            /* SPARC */
689            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
690            /* x86 */
691            (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
692            (try_next_mask &&
693             (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
694                                                                    top_frame_expression_stack_adjustment))) ||
695            (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
696            (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
697             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
698            )) {
699        ttyLocker ttyl;
700
701        // Print out some information that will help us debug the problem
702        tty->print_cr("Wrong number of expression stack elements during deoptimization");
703        tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
704        tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
705                      iframe->interpreter_frame_expression_stack_size());
706        tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
707        tty->print_cr("  try_next_mask = %d", try_next_mask);
708        tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
709        tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
710        tty->print_cr("  callee_max_locals = %d", callee_max_locals);
711        tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
712        tty->print_cr("  exec_mode = %d", exec_mode);
713        tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
714        tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
715        tty->print_cr("  Interpreted frames:");
716        for (int k = 0; k < cur_array->frames(); k++) {
717          vframeArrayElement* el = cur_array->element(k);
718          tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
719        }
720        cur_array->print_on_2(tty);
721        guarantee(false, "wrong number of expression stack elements during deopt");
722      }
723      VerifyOopClosure verify;
724      iframe->oops_interpreted_do(&verify, NULL, &rm, false);
725      callee_size_of_parameters = mh->size_of_parameters();
726      callee_max_locals = mh->max_locals();
727      is_top_frame = false;
728    }
729  }
730#endif /* !PRODUCT */
731
732
733  return bt;
734JRT_END
735
736
737int Deoptimization::deoptimize_dependents() {
738  Threads::deoptimized_wrt_marked_nmethods();
739  return 0;
740}
741
742
743#ifdef COMPILER2
744bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
745  Handle pending_exception(thread->pending_exception());
746  const char* exception_file = thread->exception_file();
747  int exception_line = thread->exception_line();
748  thread->clear_pending_exception();
749
750  for (int i = 0; i < objects->length(); i++) {
751    assert(objects->at(i)->is_object(), "invalid debug information");
752    ObjectValue* sv = (ObjectValue*) objects->at(i);
753
754    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
755    oop obj = NULL;
756
757    if (k->oop_is_instance()) {
758      InstanceKlass* ik = InstanceKlass::cast(k());
759      obj = ik->allocate_instance(CHECK_(false));
760    } else if (k->oop_is_typeArray()) {
761      TypeArrayKlass* ak = TypeArrayKlass::cast(k());
762      assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
763      int len = sv->field_size() / type2size[ak->element_type()];
764      obj = ak->allocate(len, CHECK_(false));
765    } else if (k->oop_is_objArray()) {
766      ObjArrayKlass* ak = ObjArrayKlass::cast(k());
767      obj = ak->allocate(sv->field_size(), CHECK_(false));
768    }
769
770    assert(obj != NULL, "allocation failed");
771    assert(sv->value().is_null(), "redundant reallocation");
772    sv->set_value(obj);
773  }
774
775  if (pending_exception.not_null()) {
776    thread->set_pending_exception(pending_exception(), exception_file, exception_line);
777  }
778
779  return true;
780}
781
782// This assumes that the fields are stored in ObjectValue in the same order
783// they are yielded by do_nonstatic_fields.
784class FieldReassigner: public FieldClosure {
785  frame* _fr;
786  RegisterMap* _reg_map;
787  ObjectValue* _sv;
788  InstanceKlass* _ik;
789  oop _obj;
790
791  int _i;
792public:
793  FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
794    _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
795
796  int i() const { return _i; }
797
798
799  void do_field(fieldDescriptor* fd) {
800    intptr_t val;
801    StackValue* value =
802      StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
803    int offset = fd->offset();
804    switch (fd->field_type()) {
805    case T_OBJECT: case T_ARRAY:
806      assert(value->type() == T_OBJECT, "Agreement.");
807      _obj->obj_field_put(offset, value->get_obj()());
808      break;
809
810    case T_LONG: case T_DOUBLE: {
811      assert(value->type() == T_INT, "Agreement.");
812      StackValue* low =
813        StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
814#ifdef _LP64
815      jlong res = (jlong)low->get_int();
816#else
817#ifdef SPARC
818      // For SPARC we have to swap high and low words.
819      jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
820#else
821      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
822#endif //SPARC
823#endif
824      _obj->long_field_put(offset, res);
825      break;
826    }
827    // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
828    case T_INT: case T_FLOAT: // 4 bytes.
829      assert(value->type() == T_INT, "Agreement.");
830      val = value->get_int();
831      _obj->int_field_put(offset, (jint)*((jint*)&val));
832      break;
833
834    case T_SHORT: case T_CHAR: // 2 bytes
835      assert(value->type() == T_INT, "Agreement.");
836      val = value->get_int();
837      _obj->short_field_put(offset, (jshort)*((jint*)&val));
838      break;
839
840    case T_BOOLEAN: case T_BYTE: // 1 byte
841      assert(value->type() == T_INT, "Agreement.");
842      val = value->get_int();
843      _obj->bool_field_put(offset, (jboolean)*((jint*)&val));
844      break;
845
846    default:
847      ShouldNotReachHere();
848    }
849    _i++;
850  }
851};
852
853// restore elements of an eliminated type array
854void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
855  int index = 0;
856  intptr_t val;
857
858  for (int i = 0; i < sv->field_size(); i++) {
859    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
860    switch(type) {
861    case T_LONG: case T_DOUBLE: {
862      assert(value->type() == T_INT, "Agreement.");
863      StackValue* low =
864        StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
865#ifdef _LP64
866      jlong res = (jlong)low->get_int();
867#else
868#ifdef SPARC
869      // For SPARC we have to swap high and low words.
870      jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
871#else
872      jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
873#endif //SPARC
874#endif
875      obj->long_at_put(index, res);
876      break;
877    }
878
879    // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
880    case T_INT: case T_FLOAT: // 4 bytes.
881      assert(value->type() == T_INT, "Agreement.");
882      val = value->get_int();
883      obj->int_at_put(index, (jint)*((jint*)&val));
884      break;
885
886    case T_SHORT: case T_CHAR: // 2 bytes
887      assert(value->type() == T_INT, "Agreement.");
888      val = value->get_int();
889      obj->short_at_put(index, (jshort)*((jint*)&val));
890      break;
891
892    case T_BOOLEAN: case T_BYTE: // 1 byte
893      assert(value->type() == T_INT, "Agreement.");
894      val = value->get_int();
895      obj->bool_at_put(index, (jboolean)*((jint*)&val));
896      break;
897
898      default:
899        ShouldNotReachHere();
900    }
901    index++;
902  }
903}
904
905
906// restore fields of an eliminated object array
907void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
908  for (int i = 0; i < sv->field_size(); i++) {
909    StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
910    assert(value->type() == T_OBJECT, "object element expected");
911    obj->obj_at_put(i, value->get_obj()());
912  }
913}
914
915
916// restore fields of all eliminated objects and arrays
917void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
918  for (int i = 0; i < objects->length(); i++) {
919    ObjectValue* sv = (ObjectValue*) objects->at(i);
920    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
921    Handle obj = sv->value();
922    assert(obj.not_null(), "reallocation was missed");
923
924    if (k->oop_is_instance()) {
925      InstanceKlass* ik = InstanceKlass::cast(k());
926      FieldReassigner reassign(fr, reg_map, sv, obj());
927      ik->do_nonstatic_fields(&reassign);
928    } else if (k->oop_is_typeArray()) {
929      TypeArrayKlass* ak = TypeArrayKlass::cast(k());
930      reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
931    } else if (k->oop_is_objArray()) {
932      reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
933    }
934  }
935}
936
937
938// relock objects for which synchronization was eliminated
939void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
940  for (int i = 0; i < monitors->length(); i++) {
941    MonitorInfo* mon_info = monitors->at(i);
942    if (mon_info->eliminated()) {
943      assert(mon_info->owner() != NULL, "reallocation was missed");
944      Handle obj = Handle(mon_info->owner());
945      markOop mark = obj->mark();
946      if (UseBiasedLocking && mark->has_bias_pattern()) {
947        // New allocated objects may have the mark set to anonymously biased.
948        // Also the deoptimized method may called methods with synchronization
949        // where the thread-local object is bias locked to the current thread.
950        assert(mark->is_biased_anonymously() ||
951               mark->biased_locker() == thread, "should be locked to current thread");
952        // Reset mark word to unbiased prototype.
953        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
954        obj->set_mark(unbiased_prototype);
955      }
956      BasicLock* lock = mon_info->lock();
957      ObjectSynchronizer::slow_enter(obj, lock, thread);
958    }
959    assert(mon_info->owner()->is_locked(), "object must be locked now");
960  }
961}
962
963
964#ifndef PRODUCT
965// print information about reallocated objects
966void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
967  fieldDescriptor fd;
968
969  for (int i = 0; i < objects->length(); i++) {
970    ObjectValue* sv = (ObjectValue*) objects->at(i);
971    KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
972    Handle obj = sv->value();
973
974    tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
975    k->print_value();
976    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
977    tty->cr();
978
979    if (Verbose) {
980      k->oop_print_on(obj(), tty);
981    }
982  }
983}
984#endif
985#endif // COMPILER2
986
987vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
988  Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
989
990#ifndef PRODUCT
991  if (TraceDeoptimization) {
992    ttyLocker ttyl;
993    tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
994    fr.print_on(tty);
995    tty->print_cr("     Virtual frames (innermost first):");
996    for (int index = 0; index < chunk->length(); index++) {
997      compiledVFrame* vf = chunk->at(index);
998      tty->print("       %2d - ", index);
999      vf->print_value();
1000      int bci = chunk->at(index)->raw_bci();
1001      const char* code_name;
1002      if (bci == SynchronizationEntryBCI) {
1003        code_name = "sync entry";
1004      } else {
1005        Bytecodes::Code code = vf->method()->code_at(bci);
1006        code_name = Bytecodes::name(code);
1007      }
1008      tty->print(" - %s", code_name);
1009      tty->print_cr(" @ bci %d ", bci);
1010      if (Verbose) {
1011        vf->print();
1012        tty->cr();
1013      }
1014    }
1015  }
1016#endif
1017
1018  // Register map for next frame (used for stack crawl).  We capture
1019  // the state of the deopt'ing frame's caller.  Thus if we need to
1020  // stuff a C2I adapter we can properly fill in the callee-save
1021  // register locations.
1022  frame caller = fr.sender(reg_map);
1023  int frame_size = caller.sp() - fr.sp();
1024
1025  frame sender = caller;
1026
1027  // Since the Java thread being deoptimized will eventually adjust it's own stack,
1028  // the vframeArray containing the unpacking information is allocated in the C heap.
1029  // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1030  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
1031
1032  // Compare the vframeArray to the collected vframes
1033  assert(array->structural_compare(thread, chunk), "just checking");
1034
1035#ifndef PRODUCT
1036  if (TraceDeoptimization) {
1037    ttyLocker ttyl;
1038    tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
1039  }
1040#endif // PRODUCT
1041
1042  return array;
1043}
1044
1045
1046static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
1047  GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
1048  for (int i = 0; i < monitors->length(); i++) {
1049    MonitorInfo* mon_info = monitors->at(i);
1050    if (!mon_info->eliminated() && mon_info->owner() != NULL) {
1051      objects_to_revoke->append(Handle(mon_info->owner()));
1052    }
1053  }
1054}
1055
1056
1057void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
1058  if (!UseBiasedLocking) {
1059    return;
1060  }
1061
1062  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1063
1064  // Unfortunately we don't have a RegisterMap available in most of
1065  // the places we want to call this routine so we need to walk the
1066  // stack again to update the register map.
1067  if (map == NULL || !map->update_map()) {
1068    StackFrameStream sfs(thread, true);
1069    bool found = false;
1070    while (!found && !sfs.is_done()) {
1071      frame* cur = sfs.current();
1072      sfs.next();
1073      found = cur->id() == fr.id();
1074    }
1075    assert(found, "frame to be deoptimized not found on target thread's stack");
1076    map = sfs.register_map();
1077  }
1078
1079  vframe* vf = vframe::new_vframe(&fr, map, thread);
1080  compiledVFrame* cvf = compiledVFrame::cast(vf);
1081  // Revoke monitors' biases in all scopes
1082  while (!cvf->is_top()) {
1083    collect_monitors(cvf, objects_to_revoke);
1084    cvf = compiledVFrame::cast(cvf->sender());
1085  }
1086  collect_monitors(cvf, objects_to_revoke);
1087
1088  if (SafepointSynchronize::is_at_safepoint()) {
1089    BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1090  } else {
1091    BiasedLocking::revoke(objects_to_revoke);
1092  }
1093}
1094
1095
1096void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
1097  if (!UseBiasedLocking) {
1098    return;
1099  }
1100
1101  assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
1102  GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
1103  for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
1104    if (jt->has_last_Java_frame()) {
1105      StackFrameStream sfs(jt, true);
1106      while (!sfs.is_done()) {
1107        frame* cur = sfs.current();
1108        if (cb->contains(cur->pc())) {
1109          vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
1110          compiledVFrame* cvf = compiledVFrame::cast(vf);
1111          // Revoke monitors' biases in all scopes
1112          while (!cvf->is_top()) {
1113            collect_monitors(cvf, objects_to_revoke);
1114            cvf = compiledVFrame::cast(cvf->sender());
1115          }
1116          collect_monitors(cvf, objects_to_revoke);
1117        }
1118        sfs.next();
1119      }
1120    }
1121  }
1122  BiasedLocking::revoke_at_safepoint(objects_to_revoke);
1123}
1124
1125
1126void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
1127  assert(fr.can_be_deoptimized(), "checking frame type");
1128
1129  gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
1130
1131  // Patch the nmethod so that when execution returns to it we will
1132  // deopt the execution state and return to the interpreter.
1133  fr.deoptimize(thread);
1134}
1135
1136void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
1137  // Deoptimize only if the frame comes from compile code.
1138  // Do not deoptimize the frame which is already patched
1139  // during the execution of the loops below.
1140  if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1141    return;
1142  }
1143  ResourceMark rm;
1144  DeoptimizationMarker dm;
1145  if (UseBiasedLocking) {
1146    revoke_biases_of_monitors(thread, fr, map);
1147  }
1148  deoptimize_single_frame(thread, fr);
1149
1150}
1151
1152
1153void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id) {
1154  assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
1155         "can only deoptimize other thread at a safepoint");
1156  // Compute frame and register map based on thread and sp.
1157  RegisterMap reg_map(thread, UseBiasedLocking);
1158  frame fr = thread->last_frame();
1159  while (fr.id() != id) {
1160    fr = fr.sender(&reg_map);
1161  }
1162  deoptimize(thread, fr, &reg_map);
1163}
1164
1165
1166void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1167  if (thread == Thread::current()) {
1168    Deoptimization::deoptimize_frame_internal(thread, id);
1169  } else {
1170    VM_DeoptimizeFrame deopt(thread, id);
1171    VMThread::execute(&deopt);
1172  }
1173}
1174
1175
1176// JVMTI PopFrame support
1177JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1178{
1179  thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1180}
1181JRT_END
1182
1183
1184#if defined(COMPILER2) || defined(SHARK)
1185void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
1186  // in case of an unresolved klass entry, load the class.
1187  if (constant_pool->tag_at(index).is_unresolved_klass()) {
1188    Klass* tk = constant_pool->klass_at(index, CHECK);
1189    return;
1190  }
1191
1192  if (!constant_pool->tag_at(index).is_symbol()) return;
1193
1194  Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
1195  Symbol*  symbol  = constant_pool->symbol_at(index);
1196
1197  // class name?
1198  if (symbol->byte_at(0) != '(') {
1199    Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1200    SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
1201    return;
1202  }
1203
1204  // then it must be a signature!
1205  ResourceMark rm(THREAD);
1206  for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
1207    if (ss.is_object()) {
1208      Symbol* class_name = ss.as_symbol(CHECK);
1209      Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
1210      SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
1211    }
1212  }
1213}
1214
1215
1216void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
1217  EXCEPTION_MARK;
1218  load_class_by_index(constant_pool, index, THREAD);
1219  if (HAS_PENDING_EXCEPTION) {
1220    // Exception happened during classloading. We ignore the exception here, since it
1221    // is going to be rethrown since the current activation is going to be deoptimzied and
1222    // the interpreter will re-execute the bytecode.
1223    CLEAR_PENDING_EXCEPTION;
1224  }
1225}
1226
1227JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
1228  HandleMark hm;
1229
1230  // uncommon_trap() is called at the beginning of the uncommon trap
1231  // handler. Note this fact before we start generating temporary frames
1232  // that can confuse an asynchronous stack walker. This counter is
1233  // decremented at the end of unpack_frames().
1234  thread->inc_in_deopt_handler();
1235
1236  // We need to update the map if we have biased locking.
1237  RegisterMap reg_map(thread, UseBiasedLocking);
1238  frame stub_frame = thread->last_frame();
1239  frame fr = stub_frame.sender(&reg_map);
1240  // Make sure the calling nmethod is not getting deoptimized and removed
1241  // before we are done with it.
1242  nmethodLocker nl(fr.pc());
1243
1244  // Log a message
1245  Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT,
1246              trap_request, fr.pc());
1247
1248  {
1249    ResourceMark rm;
1250
1251    // Revoke biases of any monitors in the frame to ensure we can migrate them
1252    revoke_biases_of_monitors(thread, fr, &reg_map);
1253
1254    DeoptReason reason = trap_request_reason(trap_request);
1255    DeoptAction action = trap_request_action(trap_request);
1256    jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1257
1258    vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
1259    compiledVFrame* cvf = compiledVFrame::cast(vf);
1260
1261    nmethod* nm = cvf->code();
1262
1263    ScopeDesc*      trap_scope  = cvf->scope();
1264    methodHandle    trap_method = trap_scope->method();
1265    int             trap_bci    = trap_scope->bci();
1266    Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1267
1268    // Record this event in the histogram.
1269    gather_statistics(reason, action, trap_bc);
1270
1271    // Ensure that we can record deopt. history:
1272    bool create_if_missing = ProfileTraps;
1273
1274    MethodData* trap_mdo =
1275      get_method_data(thread, trap_method, create_if_missing);
1276
1277    // Log a message
1278    Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d",
1279                              trap_reason_name(reason), trap_action_name(action), fr.pc(),
1280                              trap_method->name_and_sig_as_C_string(), trap_bci);
1281
1282    // Print a bunch of diagnostics, if requested.
1283    if (TraceDeoptimization || LogCompilation) {
1284      ResourceMark rm;
1285      ttyLocker ttyl;
1286      char buf[100];
1287      if (xtty != NULL) {
1288        xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
1289                         os::current_thread_id(),
1290                         format_trap_request(buf, sizeof(buf), trap_request));
1291        nm->log_identity(xtty);
1292      }
1293      Symbol* class_name = NULL;
1294      bool unresolved = false;
1295      if (unloaded_class_index >= 0) {
1296        constantPoolHandle constants (THREAD, trap_method->constants());
1297        if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1298          class_name = constants->klass_name_at(unloaded_class_index);
1299          unresolved = true;
1300          if (xtty != NULL)
1301            xtty->print(" unresolved='1'");
1302        } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1303          class_name = constants->symbol_at(unloaded_class_index);
1304        }
1305        if (xtty != NULL)
1306          xtty->name(class_name);
1307      }
1308      if (xtty != NULL && trap_mdo != NULL) {
1309        // Dump the relevant MDO state.
1310        // This is the deopt count for the current reason, any previous
1311        // reasons or recompiles seen at this point.
1312        int dcnt = trap_mdo->trap_count(reason);
1313        if (dcnt != 0)
1314          xtty->print(" count='%d'", dcnt);
1315        ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
1316        int dos = (pdata == NULL)? 0: pdata->trap_state();
1317        if (dos != 0) {
1318          xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
1319          if (trap_state_is_recompiled(dos)) {
1320            int recnt2 = trap_mdo->overflow_recompile_count();
1321            if (recnt2 != 0)
1322              xtty->print(" recompiles2='%d'", recnt2);
1323          }
1324        }
1325      }
1326      if (xtty != NULL) {
1327        xtty->stamp();
1328        xtty->end_head();
1329      }
1330      if (TraceDeoptimization) {  // make noise on the tty
1331        tty->print("Uncommon trap occurred in");
1332        nm->method()->print_short_name(tty);
1333        tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d",
1334                   fr.pc(),
1335                   os::current_thread_id(),
1336                   trap_reason_name(reason),
1337                   trap_action_name(action),
1338                   unloaded_class_index);
1339        if (class_name != NULL) {
1340          tty->print(unresolved ? " unresolved class: " : " symbol: ");
1341          class_name->print_symbol_on(tty);
1342        }
1343        tty->cr();
1344      }
1345      if (xtty != NULL) {
1346        // Log the precise location of the trap.
1347        for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
1348          xtty->begin_elem("jvms bci='%d'", sd->bci());
1349          xtty->method(sd->method());
1350          xtty->end_elem();
1351          if (sd->is_top())  break;
1352        }
1353        xtty->tail("uncommon_trap");
1354      }
1355    }
1356    // (End diagnostic printout.)
1357
1358    // Load class if necessary
1359    if (unloaded_class_index >= 0) {
1360      constantPoolHandle constants(THREAD, trap_method->constants());
1361      load_class_by_index(constants, unloaded_class_index);
1362    }
1363
1364    // Flush the nmethod if necessary and desirable.
1365    //
1366    // We need to avoid situations where we are re-flushing the nmethod
1367    // because of a hot deoptimization site.  Repeated flushes at the same
1368    // point need to be detected by the compiler and avoided.  If the compiler
1369    // cannot avoid them (or has a bug and "refuses" to avoid them), this
1370    // module must take measures to avoid an infinite cycle of recompilation
1371    // and deoptimization.  There are several such measures:
1372    //
1373    //   1. If a recompilation is ordered a second time at some site X
1374    //   and for the same reason R, the action is adjusted to 'reinterpret',
1375    //   to give the interpreter time to exercise the method more thoroughly.
1376    //   If this happens, the method's overflow_recompile_count is incremented.
1377    //
1378    //   2. If the compiler fails to reduce the deoptimization rate, then
1379    //   the method's overflow_recompile_count will begin to exceed the set
1380    //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
1381    //   is adjusted to 'make_not_compilable', and the method is abandoned
1382    //   to the interpreter.  This is a performance hit for hot methods,
1383    //   but is better than a disastrous infinite cycle of recompilations.
1384    //   (Actually, only the method containing the site X is abandoned.)
1385    //
1386    //   3. In parallel with the previous measures, if the total number of
1387    //   recompilations of a method exceeds the much larger set limit
1388    //   PerMethodRecompilationCutoff, the method is abandoned.
1389    //   This should only happen if the method is very large and has
1390    //   many "lukewarm" deoptimizations.  The code which enforces this
1391    //   limit is elsewhere (class nmethod, class Method).
1392    //
1393    // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
1394    // to recompile at each bytecode independently of the per-BCI cutoff.
1395    //
1396    // The decision to update code is up to the compiler, and is encoded
1397    // in the Action_xxx code.  If the compiler requests Action_none
1398    // no trap state is changed, no compiled code is changed, and the
1399    // computation suffers along in the interpreter.
1400    //
1401    // The other action codes specify various tactics for decompilation
1402    // and recompilation.  Action_maybe_recompile is the loosest, and
1403    // allows the compiled code to stay around until enough traps are seen,
1404    // and until the compiler gets around to recompiling the trapping method.
1405    //
1406    // The other actions cause immediate removal of the present code.
1407
1408    bool update_trap_state = true;
1409    bool make_not_entrant = false;
1410    bool make_not_compilable = false;
1411    bool reprofile = false;
1412    switch (action) {
1413    case Action_none:
1414      // Keep the old code.
1415      update_trap_state = false;
1416      break;
1417    case Action_maybe_recompile:
1418      // Do not need to invalidate the present code, but we can
1419      // initiate another
1420      // Start compiler without (necessarily) invalidating the nmethod.
1421      // The system will tolerate the old code, but new code should be
1422      // generated when possible.
1423      break;
1424    case Action_reinterpret:
1425      // Go back into the interpreter for a while, and then consider
1426      // recompiling form scratch.
1427      make_not_entrant = true;
1428      // Reset invocation counter for outer most method.
1429      // This will allow the interpreter to exercise the bytecodes
1430      // for a while before recompiling.
1431      // By contrast, Action_make_not_entrant is immediate.
1432      //
1433      // Note that the compiler will track null_check, null_assert,
1434      // range_check, and class_check events and log them as if they
1435      // had been traps taken from compiled code.  This will update
1436      // the MDO trap history so that the next compilation will
1437      // properly detect hot trap sites.
1438      reprofile = true;
1439      break;
1440    case Action_make_not_entrant:
1441      // Request immediate recompilation, and get rid of the old code.
1442      // Make them not entrant, so next time they are called they get
1443      // recompiled.  Unloaded classes are loaded now so recompile before next
1444      // time they are called.  Same for uninitialized.  The interpreter will
1445      // link the missing class, if any.
1446      make_not_entrant = true;
1447      break;
1448    case Action_make_not_compilable:
1449      // Give up on compiling this method at all.
1450      make_not_entrant = true;
1451      make_not_compilable = true;
1452      break;
1453    default:
1454      ShouldNotReachHere();
1455    }
1456
1457    // Setting +ProfileTraps fixes the following, on all platforms:
1458    // 4852688: ProfileInterpreter is off by default for ia64.  The result is
1459    // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
1460    // recompile relies on a MethodData* to record heroic opt failures.
1461
1462    // Whether the interpreter is producing MDO data or not, we also need
1463    // to use the MDO to detect hot deoptimization points and control
1464    // aggressive optimization.
1465    bool inc_recompile_count = false;
1466    ProfileData* pdata = NULL;
1467    if (ProfileTraps && update_trap_state && trap_mdo != NULL) {
1468      assert(trap_mdo == get_method_data(thread, trap_method, false), "sanity");
1469      uint this_trap_count = 0;
1470      bool maybe_prior_trap = false;
1471      bool maybe_prior_recompile = false;
1472      pdata = query_update_method_data(trap_mdo, trap_bci, reason,
1473                                   //outputs:
1474                                   this_trap_count,
1475                                   maybe_prior_trap,
1476                                   maybe_prior_recompile);
1477      // Because the interpreter also counts null, div0, range, and class
1478      // checks, these traps from compiled code are double-counted.
1479      // This is harmless; it just means that the PerXTrapLimit values
1480      // are in effect a little smaller than they look.
1481
1482      DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1483      if (per_bc_reason != Reason_none) {
1484        // Now take action based on the partially known per-BCI history.
1485        if (maybe_prior_trap
1486            && this_trap_count >= (uint)PerBytecodeTrapLimit) {
1487          // If there are too many traps at this BCI, force a recompile.
1488          // This will allow the compiler to see the limit overflow, and
1489          // take corrective action, if possible.  The compiler generally
1490          // does not use the exact PerBytecodeTrapLimit value, but instead
1491          // changes its tactics if it sees any traps at all.  This provides
1492          // a little hysteresis, delaying a recompile until a trap happens
1493          // several times.
1494          //
1495          // Actually, since there is only one bit of counter per BCI,
1496          // the possible per-BCI counts are {0,1,(per-method count)}.
1497          // This produces accurate results if in fact there is only
1498          // one hot trap site, but begins to get fuzzy if there are
1499          // many sites.  For example, if there are ten sites each
1500          // trapping two or more times, they each get the blame for
1501          // all of their traps.
1502          make_not_entrant = true;
1503        }
1504
1505        // Detect repeated recompilation at the same BCI, and enforce a limit.
1506        if (make_not_entrant && maybe_prior_recompile) {
1507          // More than one recompile at this point.
1508          inc_recompile_count = maybe_prior_trap;
1509        }
1510      } else {
1511        // For reasons which are not recorded per-bytecode, we simply
1512        // force recompiles unconditionally.
1513        // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
1514        make_not_entrant = true;
1515      }
1516
1517      // Go back to the compiler if there are too many traps in this method.
1518      if (this_trap_count >= (uint)PerMethodTrapLimit) {
1519        // If there are too many traps in this method, force a recompile.
1520        // This will allow the compiler to see the limit overflow, and
1521        // take corrective action, if possible.
1522        // (This condition is an unlikely backstop only, because the
1523        // PerBytecodeTrapLimit is more likely to take effect first,
1524        // if it is applicable.)
1525        make_not_entrant = true;
1526      }
1527
1528      // Here's more hysteresis:  If there has been a recompile at
1529      // this trap point already, run the method in the interpreter
1530      // for a while to exercise it more thoroughly.
1531      if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
1532        reprofile = true;
1533      }
1534
1535    }
1536
1537    // Take requested actions on the method:
1538
1539    // Recompile
1540    if (make_not_entrant) {
1541      if (!nm->make_not_entrant()) {
1542        return; // the call did not change nmethod's state
1543      }
1544
1545      if (pdata != NULL) {
1546        // Record the recompilation event, if any.
1547        int tstate0 = pdata->trap_state();
1548        int tstate1 = trap_state_set_recompiled(tstate0, true);
1549        if (tstate1 != tstate0)
1550          pdata->set_trap_state(tstate1);
1551      }
1552    }
1553
1554    if (inc_recompile_count) {
1555      trap_mdo->inc_overflow_recompile_count();
1556      if ((uint)trap_mdo->overflow_recompile_count() >
1557          (uint)PerBytecodeRecompilationCutoff) {
1558        // Give up on the method containing the bad BCI.
1559        if (trap_method() == nm->method()) {
1560          make_not_compilable = true;
1561        } else {
1562          trap_method->set_not_compilable(CompLevel_full_optimization);
1563          // But give grace to the enclosing nm->method().
1564        }
1565      }
1566    }
1567
1568    // Reprofile
1569    if (reprofile) {
1570      CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
1571    }
1572
1573    // Give up compiling
1574    if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
1575      assert(make_not_entrant, "consistent");
1576      nm->method()->set_not_compilable(CompLevel_full_optimization);
1577    }
1578
1579  } // Free marked resources
1580
1581}
1582JRT_END
1583
1584MethodData*
1585Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
1586                                bool create_if_missing) {
1587  Thread* THREAD = thread;
1588  MethodData* mdo = m()->method_data();
1589  if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1590    // Build an MDO.  Ignore errors like OutOfMemory;
1591    // that simply means we won't have an MDO to update.
1592    Method::build_interpreter_method_data(m, THREAD);
1593    if (HAS_PENDING_EXCEPTION) {
1594      assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
1595      CLEAR_PENDING_EXCEPTION;
1596    }
1597    mdo = m()->method_data();
1598  }
1599  return mdo;
1600}
1601
1602ProfileData*
1603Deoptimization::query_update_method_data(MethodData* trap_mdo,
1604                                         int trap_bci,
1605                                         Deoptimization::DeoptReason reason,
1606                                         //outputs:
1607                                         uint& ret_this_trap_count,
1608                                         bool& ret_maybe_prior_trap,
1609                                         bool& ret_maybe_prior_recompile) {
1610  uint prior_trap_count = trap_mdo->trap_count(reason);
1611  uint this_trap_count  = trap_mdo->inc_trap_count(reason);
1612
1613  // If the runtime cannot find a place to store trap history,
1614  // it is estimated based on the general condition of the method.
1615  // If the method has ever been recompiled, or has ever incurred
1616  // a trap with the present reason , then this BCI is assumed
1617  // (pessimistically) to be the culprit.
1618  bool maybe_prior_trap      = (prior_trap_count != 0);
1619  bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
1620  ProfileData* pdata = NULL;
1621
1622
1623  // For reasons which are recorded per bytecode, we check per-BCI data.
1624  DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
1625  if (per_bc_reason != Reason_none) {
1626    // Find the profile data for this BCI.  If there isn't one,
1627    // try to allocate one from the MDO's set of spares.
1628    // This will let us detect a repeated trap at this point.
1629    pdata = trap_mdo->allocate_bci_to_data(trap_bci);
1630
1631    if (pdata != NULL) {
1632      // Query the trap state of this profile datum.
1633      int tstate0 = pdata->trap_state();
1634      if (!trap_state_has_reason(tstate0, per_bc_reason))
1635        maybe_prior_trap = false;
1636      if (!trap_state_is_recompiled(tstate0))
1637        maybe_prior_recompile = false;
1638
1639      // Update the trap state of this profile datum.
1640      int tstate1 = tstate0;
1641      // Record the reason.
1642      tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
1643      // Store the updated state on the MDO, for next time.
1644      if (tstate1 != tstate0)
1645        pdata->set_trap_state(tstate1);
1646    } else {
1647      if (LogCompilation && xtty != NULL) {
1648        ttyLocker ttyl;
1649        // Missing MDP?  Leave a small complaint in the log.
1650        xtty->elem("missing_mdp bci='%d'", trap_bci);
1651      }
1652    }
1653  }
1654
1655  // Return results:
1656  ret_this_trap_count = this_trap_count;
1657  ret_maybe_prior_trap = maybe_prior_trap;
1658  ret_maybe_prior_recompile = maybe_prior_recompile;
1659  return pdata;
1660}
1661
1662void
1663Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
1664  ResourceMark rm;
1665  // Ignored outputs:
1666  uint ignore_this_trap_count;
1667  bool ignore_maybe_prior_trap;
1668  bool ignore_maybe_prior_recompile;
1669  query_update_method_data(trap_mdo, trap_bci,
1670                           (DeoptReason)reason,
1671                           ignore_this_trap_count,
1672                           ignore_maybe_prior_trap,
1673                           ignore_maybe_prior_recompile);
1674}
1675
1676Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
1677
1678  // Still in Java no safepoints
1679  {
1680    // This enters VM and may safepoint
1681    uncommon_trap_inner(thread, trap_request);
1682  }
1683  return fetch_unroll_info_helper(thread);
1684}
1685
1686// Local derived constants.
1687// Further breakdown of DataLayout::trap_state, as promised by DataLayout.
1688const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
1689const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
1690
1691//---------------------------trap_state_reason---------------------------------
1692Deoptimization::DeoptReason
1693Deoptimization::trap_state_reason(int trap_state) {
1694  // This assert provides the link between the width of DataLayout::trap_bits
1695  // and the encoding of "recorded" reasons.  It ensures there are enough
1696  // bits to store all needed reasons in the per-BCI MDO profile.
1697  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1698  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1699  trap_state -= recompile_bit;
1700  if (trap_state == DS_REASON_MASK) {
1701    return Reason_many;
1702  } else {
1703    assert((int)Reason_none == 0, "state=0 => Reason_none");
1704    return (DeoptReason)trap_state;
1705  }
1706}
1707//-------------------------trap_state_has_reason-------------------------------
1708int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1709  assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
1710  assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
1711  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1712  trap_state -= recompile_bit;
1713  if (trap_state == DS_REASON_MASK) {
1714    return -1;  // true, unspecifically (bottom of state lattice)
1715  } else if (trap_state == reason) {
1716    return 1;   // true, definitely
1717  } else if (trap_state == 0) {
1718    return 0;   // false, definitely (top of state lattice)
1719  } else {
1720    return 0;   // false, definitely
1721  }
1722}
1723//-------------------------trap_state_add_reason-------------------------------
1724int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
1725  assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
1726  int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
1727  trap_state -= recompile_bit;
1728  if (trap_state == DS_REASON_MASK) {
1729    return trap_state + recompile_bit;     // already at state lattice bottom
1730  } else if (trap_state == reason) {
1731    return trap_state + recompile_bit;     // the condition is already true
1732  } else if (trap_state == 0) {
1733    return reason + recompile_bit;          // no condition has yet been true
1734  } else {
1735    return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
1736  }
1737}
1738//-----------------------trap_state_is_recompiled------------------------------
1739bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1740  return (trap_state & DS_RECOMPILE_BIT) != 0;
1741}
1742//-----------------------trap_state_set_recompiled-----------------------------
1743int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
1744  if (z)  return trap_state |  DS_RECOMPILE_BIT;
1745  else    return trap_state & ~DS_RECOMPILE_BIT;
1746}
1747//---------------------------format_trap_state---------------------------------
1748// This is used for debugging and diagnostics, including hotspot.log output.
1749const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1750                                              int trap_state) {
1751  DeoptReason reason      = trap_state_reason(trap_state);
1752  bool        recomp_flag = trap_state_is_recompiled(trap_state);
1753  // Re-encode the state from its decoded components.
1754  int decoded_state = 0;
1755  if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
1756    decoded_state = trap_state_add_reason(decoded_state, reason);
1757  if (recomp_flag)
1758    decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
1759  // If the state re-encodes properly, format it symbolically.
1760  // Because this routine is used for debugging and diagnostics,
1761  // be robust even if the state is a strange value.
1762  size_t len;
1763  if (decoded_state != trap_state) {
1764    // Random buggy state that doesn't decode??
1765    len = jio_snprintf(buf, buflen, "#%d", trap_state);
1766  } else {
1767    len = jio_snprintf(buf, buflen, "%s%s",
1768                       trap_reason_name(reason),
1769                       recomp_flag ? " recompiled" : "");
1770  }
1771  if (len >= buflen)
1772    buf[buflen-1] = '\0';
1773  return buf;
1774}
1775
1776
1777//--------------------------------statics--------------------------------------
1778Deoptimization::DeoptAction Deoptimization::_unloaded_action
1779  = Deoptimization::Action_reinterpret;
1780const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
1781  // Note:  Keep this in sync. with enum DeoptReason.
1782  "none",
1783  "null_check",
1784  "null_assert",
1785  "range_check",
1786  "class_check",
1787  "array_check",
1788  "intrinsic",
1789  "bimorphic",
1790  "unloaded",
1791  "uninitialized",
1792  "unreached",
1793  "unhandled",
1794  "constraint",
1795  "div0_check",
1796  "age",
1797  "predicate",
1798  "loop_limit_check"
1799};
1800const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
1801  // Note:  Keep this in sync. with enum DeoptAction.
1802  "none",
1803  "maybe_recompile",
1804  "reinterpret",
1805  "make_not_entrant",
1806  "make_not_compilable"
1807};
1808
1809const char* Deoptimization::trap_reason_name(int reason) {
1810  if (reason == Reason_many)  return "many";
1811  if ((uint)reason < Reason_LIMIT)
1812    return _trap_reason_name[reason];
1813  static char buf[20];
1814  sprintf(buf, "reason%d", reason);
1815  return buf;
1816}
1817const char* Deoptimization::trap_action_name(int action) {
1818  if ((uint)action < Action_LIMIT)
1819    return _trap_action_name[action];
1820  static char buf[20];
1821  sprintf(buf, "action%d", action);
1822  return buf;
1823}
1824
1825// This is used for debugging and diagnostics, including hotspot.log output.
1826const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
1827                                                int trap_request) {
1828  jint unloaded_class_index = trap_request_index(trap_request);
1829  const char* reason = trap_reason_name(trap_request_reason(trap_request));
1830  const char* action = trap_action_name(trap_request_action(trap_request));
1831  size_t len;
1832  if (unloaded_class_index < 0) {
1833    len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
1834                       reason, action);
1835  } else {
1836    len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
1837                       reason, action, unloaded_class_index);
1838  }
1839  if (len >= buflen)
1840    buf[buflen-1] = '\0';
1841  return buf;
1842}
1843
1844juint Deoptimization::_deoptimization_hist
1845        [Deoptimization::Reason_LIMIT]
1846    [1 + Deoptimization::Action_LIMIT]
1847        [Deoptimization::BC_CASE_LIMIT]
1848  = {0};
1849
1850enum {
1851  LSB_BITS = 8,
1852  LSB_MASK = right_n_bits(LSB_BITS)
1853};
1854
1855void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1856                                       Bytecodes::Code bc) {
1857  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1858  assert(action >= 0 && action < Action_LIMIT, "oob");
1859  _deoptimization_hist[Reason_none][0][0] += 1;  // total
1860  _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
1861  juint* cases = _deoptimization_hist[reason][1+action];
1862  juint* bc_counter_addr = NULL;
1863  juint  bc_counter      = 0;
1864  // Look for an unused counter, or an exact match to this BC.
1865  if (bc != Bytecodes::_illegal) {
1866    for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1867      juint* counter_addr = &cases[bc_case];
1868      juint  counter = *counter_addr;
1869      if ((counter == 0 && bc_counter_addr == NULL)
1870          || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
1871        // this counter is either free or is already devoted to this BC
1872        bc_counter_addr = counter_addr;
1873        bc_counter = counter | bc;
1874      }
1875    }
1876  }
1877  if (bc_counter_addr == NULL) {
1878    // Overflow, or no given bytecode.
1879    bc_counter_addr = &cases[BC_CASE_LIMIT-1];
1880    bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
1881  }
1882  *bc_counter_addr = bc_counter + (1 << LSB_BITS);
1883}
1884
1885jint Deoptimization::total_deoptimization_count() {
1886  return _deoptimization_hist[Reason_none][0][0];
1887}
1888
1889jint Deoptimization::deoptimization_count(DeoptReason reason) {
1890  assert(reason >= 0 && reason < Reason_LIMIT, "oob");
1891  return _deoptimization_hist[reason][0][0];
1892}
1893
1894void Deoptimization::print_statistics() {
1895  juint total = total_deoptimization_count();
1896  juint account = total;
1897  if (total != 0) {
1898    ttyLocker ttyl;
1899    if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
1900    tty->print_cr("Deoptimization traps recorded:");
1901    #define PRINT_STAT_LINE(name, r) \
1902      tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
1903    PRINT_STAT_LINE("total", total);
1904    // For each non-zero entry in the histogram, print the reason,
1905    // the action, and (if specifically known) the type of bytecode.
1906    for (int reason = 0; reason < Reason_LIMIT; reason++) {
1907      for (int action = 0; action < Action_LIMIT; action++) {
1908        juint* cases = _deoptimization_hist[reason][1+action];
1909        for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
1910          juint counter = cases[bc_case];
1911          if (counter != 0) {
1912            char name[1*K];
1913            Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
1914            if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
1915              bc = Bytecodes::_illegal;
1916            sprintf(name, "%s/%s/%s",
1917                    trap_reason_name(reason),
1918                    trap_action_name(action),
1919                    Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
1920            juint r = counter >> LSB_BITS;
1921            tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
1922            account -= r;
1923          }
1924        }
1925      }
1926    }
1927    if (account != 0) {
1928      PRINT_STAT_LINE("unaccounted", account);
1929    }
1930    #undef PRINT_STAT_LINE
1931    if (xtty != NULL)  xtty->tail("statistics");
1932  }
1933}
1934#else // COMPILER2 || SHARK
1935
1936
1937// Stubs for C1 only system.
1938bool Deoptimization::trap_state_is_recompiled(int trap_state) {
1939  return false;
1940}
1941
1942const char* Deoptimization::trap_reason_name(int reason) {
1943  return "unknown";
1944}
1945
1946void Deoptimization::print_statistics() {
1947  // no output
1948}
1949
1950void
1951Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
1952  // no udpate
1953}
1954
1955int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
1956  return 0;
1957}
1958
1959void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
1960                                       Bytecodes::Code bc) {
1961  // no update
1962}
1963
1964const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
1965                                              int trap_state) {
1966  jio_snprintf(buf, buflen, "#%d", trap_state);
1967  return buf;
1968}
1969
1970#endif // COMPILER2 || SHARK
1971