nmethod.cpp revision 9099:115188e14c15
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "code/compiledIC.hpp"
28#include "code/dependencies.hpp"
29#include "code/nmethod.hpp"
30#include "code/scopeDesc.hpp"
31#include "compiler/abstractCompiler.hpp"
32#include "compiler/compileBroker.hpp"
33#include "compiler/compileLog.hpp"
34#include "compiler/compilerOracle.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/bytecode.hpp"
37#include "oops/methodData.hpp"
38#include "oops/oop.inline.hpp"
39#include "prims/jvmtiRedefineClassesTrace.hpp"
40#include "prims/jvmtiImpl.hpp"
41#include "runtime/atomic.inline.hpp"
42#include "runtime/orderAccess.inline.hpp"
43#include "runtime/sharedRuntime.hpp"
44#include "runtime/sweeper.hpp"
45#include "utilities/resourceHash.hpp"
46#include "utilities/dtrace.hpp"
47#include "utilities/events.hpp"
48#include "utilities/xmlstream.hpp"
49#ifdef SHARK
50#include "shark/sharkCompiler.hpp"
51#endif
52
53unsigned char nmethod::_global_unloading_clock = 0;
54
55#ifdef DTRACE_ENABLED
56
57// Only bother with this argument setup if dtrace is available
58
59#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
60  {                                                                       \
61    Method* m = (method);                                                 \
62    if (m != NULL) {                                                      \
63      Symbol* klass_name = m->klass_name();                               \
64      Symbol* name = m->name();                                           \
65      Symbol* signature = m->signature();                                 \
66      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
67        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
68        (char *) name->bytes(), name->utf8_length(),                               \
69        (char *) signature->bytes(), signature->utf8_length());                    \
70    }                                                                     \
71  }
72
73#else //  ndef DTRACE_ENABLED
74
75#define DTRACE_METHOD_UNLOAD_PROBE(method)
76
77#endif
78
79bool nmethod::is_compiled_by_c1() const {
80  if (compiler() == NULL) {
81    return false;
82  }
83  return compiler()->is_c1();
84}
85bool nmethod::is_compiled_by_c2() const {
86  if (compiler() == NULL) {
87    return false;
88  }
89  return compiler()->is_c2();
90}
91bool nmethod::is_compiled_by_shark() const {
92  if (compiler() == NULL) {
93    return false;
94  }
95  return compiler()->is_shark();
96}
97
98
99
100//---------------------------------------------------------------------------------
101// NMethod statistics
102// They are printed under various flags, including:
103//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
104// (In the latter two cases, they like other stats are printed to the log only.)
105
106#ifndef PRODUCT
107// These variables are put into one block to reduce relocations
108// and make it simpler to print from the debugger.
109static
110struct nmethod_stats_struct {
111  int nmethod_count;
112  int total_size;
113  int relocation_size;
114  int consts_size;
115  int insts_size;
116  int stub_size;
117  int scopes_data_size;
118  int scopes_pcs_size;
119  int dependencies_size;
120  int handler_table_size;
121  int nul_chk_table_size;
122  int oops_size;
123
124  void note_nmethod(nmethod* nm) {
125    nmethod_count += 1;
126    total_size          += nm->size();
127    relocation_size     += nm->relocation_size();
128    consts_size         += nm->consts_size();
129    insts_size          += nm->insts_size();
130    stub_size           += nm->stub_size();
131    oops_size           += nm->oops_size();
132    scopes_data_size    += nm->scopes_data_size();
133    scopes_pcs_size     += nm->scopes_pcs_size();
134    dependencies_size   += nm->dependencies_size();
135    handler_table_size  += nm->handler_table_size();
136    nul_chk_table_size  += nm->nul_chk_table_size();
137  }
138  void print_nmethod_stats() {
139    if (nmethod_count == 0)  return;
140    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
141    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
142    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
143    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
144    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
145    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
146    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
147    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
148    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
149    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
150    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
151    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
152  }
153
154  int native_nmethod_count;
155  int native_total_size;
156  int native_relocation_size;
157  int native_insts_size;
158  int native_oops_size;
159  void note_native_nmethod(nmethod* nm) {
160    native_nmethod_count += 1;
161    native_total_size       += nm->size();
162    native_relocation_size  += nm->relocation_size();
163    native_insts_size       += nm->insts_size();
164    native_oops_size        += nm->oops_size();
165  }
166  void print_native_nmethod_stats() {
167    if (native_nmethod_count == 0)  return;
168    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
169    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
170    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
171    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
172    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
173  }
174
175  int pc_desc_resets;   // number of resets (= number of caches)
176  int pc_desc_queries;  // queries to nmethod::find_pc_desc
177  int pc_desc_approx;   // number of those which have approximate true
178  int pc_desc_repeats;  // number of _pc_descs[0] hits
179  int pc_desc_hits;     // number of LRU cache hits
180  int pc_desc_tests;    // total number of PcDesc examinations
181  int pc_desc_searches; // total number of quasi-binary search steps
182  int pc_desc_adds;     // number of LUR cache insertions
183
184  void print_pc_stats() {
185    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
186                  pc_desc_queries,
187                  (double)(pc_desc_tests + pc_desc_searches)
188                  / pc_desc_queries);
189    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
190                  pc_desc_resets,
191                  pc_desc_queries, pc_desc_approx,
192                  pc_desc_repeats, pc_desc_hits,
193                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
194  }
195} nmethod_stats;
196#endif //PRODUCT
197
198
199//---------------------------------------------------------------------------------
200
201
202ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
203  assert(pc != NULL, "Must be non null");
204  assert(exception.not_null(), "Must be non null");
205  assert(handler != NULL, "Must be non null");
206
207  _count = 0;
208  _exception_type = exception->klass();
209  _next = NULL;
210
211  add_address_and_handler(pc,handler);
212}
213
214
215address ExceptionCache::match(Handle exception, address pc) {
216  assert(pc != NULL,"Must be non null");
217  assert(exception.not_null(),"Must be non null");
218  if (exception->klass() == exception_type()) {
219    return (test_address(pc));
220  }
221
222  return NULL;
223}
224
225
226bool ExceptionCache::match_exception_with_space(Handle exception) {
227  assert(exception.not_null(),"Must be non null");
228  if (exception->klass() == exception_type() && count() < cache_size) {
229    return true;
230  }
231  return false;
232}
233
234
235address ExceptionCache::test_address(address addr) {
236  for (int i=0; i<count(); i++) {
237    if (pc_at(i) == addr) {
238      return handler_at(i);
239    }
240  }
241  return NULL;
242}
243
244
245bool ExceptionCache::add_address_and_handler(address addr, address handler) {
246  if (test_address(addr) == handler) return true;
247  if (count() < cache_size) {
248    set_pc_at(count(),addr);
249    set_handler_at(count(), handler);
250    increment_count();
251    return true;
252  }
253  return false;
254}
255
256
257// private method for handling exception cache
258// These methods are private, and used to manipulate the exception cache
259// directly.
260ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
261  ExceptionCache* ec = exception_cache();
262  while (ec != NULL) {
263    if (ec->match_exception_with_space(exception)) {
264      return ec;
265    }
266    ec = ec->next();
267  }
268  return NULL;
269}
270
271
272//-----------------------------------------------------------------------------
273
274
275// Helper used by both find_pc_desc methods.
276static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
277  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
278  if (!approximate)
279    return pc->pc_offset() == pc_offset;
280  else
281    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
282}
283
284void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
285  if (initial_pc_desc == NULL) {
286    _pc_descs[0] = NULL; // native method; no PcDescs at all
287    return;
288  }
289  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
290  // reset the cache by filling it with benign (non-null) values
291  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
292  for (int i = 0; i < cache_size; i++)
293    _pc_descs[i] = initial_pc_desc;
294}
295
296PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
297  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
298  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
299
300  // Note: one might think that caching the most recently
301  // read value separately would be a win, but one would be
302  // wrong.  When many threads are updating it, the cache
303  // line it's in would bounce between caches, negating
304  // any benefit.
305
306  // In order to prevent race conditions do not load cache elements
307  // repeatedly, but use a local copy:
308  PcDesc* res;
309
310  // Step one:  Check the most recently added value.
311  res = _pc_descs[0];
312  if (res == NULL) return NULL;  // native method; no PcDescs at all
313  if (match_desc(res, pc_offset, approximate)) {
314    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
315    return res;
316  }
317
318  // Step two:  Check the rest of the LRU cache.
319  for (int i = 1; i < cache_size; ++i) {
320    res = _pc_descs[i];
321    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
322    if (match_desc(res, pc_offset, approximate)) {
323      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
324      return res;
325    }
326  }
327
328  // Report failure.
329  return NULL;
330}
331
332void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
333  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
334  // Update the LRU cache by shifting pc_desc forward.
335  for (int i = 0; i < cache_size; i++)  {
336    PcDesc* next = _pc_descs[i];
337    _pc_descs[i] = pc_desc;
338    pc_desc = next;
339  }
340}
341
342// adjust pcs_size so that it is a multiple of both oopSize and
343// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
344// of oopSize, then 2*sizeof(PcDesc) is)
345static int adjust_pcs_size(int pcs_size) {
346  int nsize = round_to(pcs_size,   oopSize);
347  if ((nsize % sizeof(PcDesc)) != 0) {
348    nsize = pcs_size + sizeof(PcDesc);
349  }
350  assert((nsize % oopSize) == 0, "correct alignment");
351  return nsize;
352}
353
354//-----------------------------------------------------------------------------
355
356
357void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
358  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
359  assert(new_entry != NULL,"Must be non null");
360  assert(new_entry->next() == NULL, "Must be null");
361
362  if (exception_cache() != NULL) {
363    new_entry->set_next(exception_cache());
364  }
365  set_exception_cache(new_entry);
366}
367
368void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
369  ExceptionCache* prev = NULL;
370  ExceptionCache* curr = exception_cache();
371
372  while (curr != NULL) {
373    ExceptionCache* next = curr->next();
374
375    Klass* ex_klass = curr->exception_type();
376    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
377      if (prev == NULL) {
378        set_exception_cache(next);
379      } else {
380        prev->set_next(next);
381      }
382      delete curr;
383      // prev stays the same.
384    } else {
385      prev = curr;
386    }
387
388    curr = next;
389  }
390}
391
392// public method for accessing the exception cache
393// These are the public access methods.
394address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
395  // We never grab a lock to read the exception cache, so we may
396  // have false negatives. This is okay, as it can only happen during
397  // the first few exception lookups for a given nmethod.
398  ExceptionCache* ec = exception_cache();
399  while (ec != NULL) {
400    address ret_val;
401    if ((ret_val = ec->match(exception,pc)) != NULL) {
402      return ret_val;
403    }
404    ec = ec->next();
405  }
406  return NULL;
407}
408
409
410void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
411  // There are potential race conditions during exception cache updates, so we
412  // must own the ExceptionCache_lock before doing ANY modifications. Because
413  // we don't lock during reads, it is possible to have several threads attempt
414  // to update the cache with the same data. We need to check for already inserted
415  // copies of the current data before adding it.
416
417  MutexLocker ml(ExceptionCache_lock);
418  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
419
420  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
421    target_entry = new ExceptionCache(exception,pc,handler);
422    add_exception_cache_entry(target_entry);
423  }
424}
425
426
427//-------------end of code for ExceptionCache--------------
428
429
430int nmethod::total_size() const {
431  return
432    consts_size()        +
433    insts_size()         +
434    stub_size()          +
435    scopes_data_size()   +
436    scopes_pcs_size()    +
437    handler_table_size() +
438    nul_chk_table_size();
439}
440
441const char* nmethod::compile_kind() const {
442  if (is_osr_method())     return "osr";
443  if (method() != NULL && is_native_method())  return "c2n";
444  return NULL;
445}
446
447// Fill in default values for various flag fields
448void nmethod::init_defaults() {
449  _state                      = in_use;
450  _unloading_clock            = 0;
451  _marked_for_reclamation     = 0;
452  _has_flushed_dependencies   = 0;
453  _has_unsafe_access          = 0;
454  _has_method_handle_invokes  = 0;
455  _lazy_critical_native       = 0;
456  _has_wide_vectors           = 0;
457  _marked_for_deoptimization  = 0;
458  _lock_count                 = 0;
459  _stack_traversal_mark       = 0;
460  _unload_reported            = false;           // jvmti state
461
462#ifdef ASSERT
463  _oops_are_stale             = false;
464#endif
465
466  _oops_do_mark_link       = NULL;
467  _jmethod_id              = NULL;
468  _osr_link                = NULL;
469  if (UseG1GC) {
470    _unloading_next        = NULL;
471  } else {
472    _scavenge_root_link    = NULL;
473  }
474  _scavenge_root_state     = 0;
475  _compiler                = NULL;
476#if INCLUDE_RTM_OPT
477  _rtm_state               = NoRTM;
478#endif
479}
480
481nmethod* nmethod::new_native_nmethod(methodHandle method,
482  int compile_id,
483  CodeBuffer *code_buffer,
484  int vep_offset,
485  int frame_complete,
486  int frame_size,
487  ByteSize basic_lock_owner_sp_offset,
488  ByteSize basic_lock_sp_offset,
489  OopMapSet* oop_maps) {
490  code_buffer->finalize_oop_references(method);
491  // create nmethod
492  nmethod* nm = NULL;
493  {
494    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
495    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
496    CodeOffsets offsets;
497    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
498    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
499    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
500                                            compile_id, &offsets,
501                                            code_buffer, frame_size,
502                                            basic_lock_owner_sp_offset,
503                                            basic_lock_sp_offset, oop_maps);
504    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
505    if ((PrintAssembly || CompilerOracle::should_print(method)) && nm != NULL) {
506      Disassembler::decode(nm);
507    }
508  }
509  // verify nmethod
510  debug_only(if (nm) nm->verify();) // might block
511
512  if (nm != NULL) {
513    nm->log_new_nmethod();
514  }
515
516  return nm;
517}
518
519nmethod* nmethod::new_nmethod(methodHandle method,
520  int compile_id,
521  int entry_bci,
522  CodeOffsets* offsets,
523  int orig_pc_offset,
524  DebugInformationRecorder* debug_info,
525  Dependencies* dependencies,
526  CodeBuffer* code_buffer, int frame_size,
527  OopMapSet* oop_maps,
528  ExceptionHandlerTable* handler_table,
529  ImplicitExceptionTable* nul_chk_table,
530  AbstractCompiler* compiler,
531  int comp_level
532)
533{
534  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
535  code_buffer->finalize_oop_references(method);
536  // create nmethod
537  nmethod* nm = NULL;
538  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
539    int nmethod_size =
540      allocation_size(code_buffer, sizeof(nmethod))
541      + adjust_pcs_size(debug_info->pcs_size())
542      + round_to(dependencies->size_in_bytes() , oopSize)
543      + round_to(handler_table->size_in_bytes(), oopSize)
544      + round_to(nul_chk_table->size_in_bytes(), oopSize)
545      + round_to(debug_info->data_size()       , oopSize);
546
547    nm = new (nmethod_size, comp_level)
548    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
549            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
550            oop_maps,
551            handler_table,
552            nul_chk_table,
553            compiler,
554            comp_level);
555
556    if (nm != NULL) {
557      // To make dependency checking during class loading fast, record
558      // the nmethod dependencies in the classes it is dependent on.
559      // This allows the dependency checking code to simply walk the
560      // class hierarchy above the loaded class, checking only nmethods
561      // which are dependent on those classes.  The slow way is to
562      // check every nmethod for dependencies which makes it linear in
563      // the number of methods compiled.  For applications with a lot
564      // classes the slow way is too slow.
565      for (Dependencies::DepStream deps(nm); deps.next(); ) {
566        if (deps.type() == Dependencies::call_site_target_value) {
567          // CallSite dependencies are managed on per-CallSite instance basis.
568          oop call_site = deps.argument_oop(0);
569          MethodHandles::add_dependent_nmethod(call_site, nm);
570        } else {
571          Klass* klass = deps.context_type();
572          if (klass == NULL) {
573            continue;  // ignore things like evol_method
574          }
575          // record this nmethod as dependent on this klass
576          InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
577        }
578      }
579      NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
580      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
581        Disassembler::decode(nm);
582      }
583    }
584  }
585  // Do verification and logging outside CodeCache_lock.
586  if (nm != NULL) {
587    // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
588    DEBUG_ONLY(nm->verify();)
589    nm->log_new_nmethod();
590  }
591  return nm;
592}
593
594
595// For native wrappers
596nmethod::nmethod(
597  Method* method,
598  int nmethod_size,
599  int compile_id,
600  CodeOffsets* offsets,
601  CodeBuffer* code_buffer,
602  int frame_size,
603  ByteSize basic_lock_owner_sp_offset,
604  ByteSize basic_lock_sp_offset,
605  OopMapSet* oop_maps )
606  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
607             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
608  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
609  _native_basic_lock_sp_offset(basic_lock_sp_offset)
610{
611  {
612    debug_only(No_Safepoint_Verifier nsv;)
613    assert_locked_or_safepoint(CodeCache_lock);
614
615    init_defaults();
616    _method                  = method;
617    _entry_bci               = InvocationEntryBci;
618    // We have no exception handler or deopt handler make the
619    // values something that will never match a pc like the nmethod vtable entry
620    _exception_offset        = 0;
621    _deoptimize_offset       = 0;
622    _deoptimize_mh_offset    = 0;
623    _orig_pc_offset          = 0;
624
625    _consts_offset           = data_offset();
626    _stub_offset             = data_offset();
627    _oops_offset             = data_offset();
628    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
629    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
630    _scopes_pcs_offset       = _scopes_data_offset;
631    _dependencies_offset     = _scopes_pcs_offset;
632    _handler_table_offset    = _dependencies_offset;
633    _nul_chk_table_offset    = _handler_table_offset;
634    _nmethod_end_offset      = _nul_chk_table_offset;
635    _compile_id              = compile_id;
636    _comp_level              = CompLevel_none;
637    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
638    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
639    _osr_entry_point         = NULL;
640    _exception_cache         = NULL;
641    _pc_desc_cache.reset_to(NULL);
642    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
643
644    code_buffer->copy_values_to(this);
645    if (ScavengeRootsInCode) {
646      if (detect_scavenge_root_oops()) {
647        CodeCache::add_scavenge_root_nmethod(this);
648      }
649      Universe::heap()->register_nmethod(this);
650    }
651    debug_only(verify_scavenge_root_oops());
652    CodeCache::commit(this);
653  }
654
655  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
656    ttyLocker ttyl;  // keep the following output all in one block
657    // This output goes directly to the tty, not the compiler log.
658    // To enable tools to match it up with the compilation activity,
659    // be sure to tag this tty output with the compile ID.
660    if (xtty != NULL) {
661      xtty->begin_head("print_native_nmethod");
662      xtty->method(_method);
663      xtty->stamp();
664      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
665    }
666    // print the header part first
667    print();
668    // then print the requested information
669    if (PrintNativeNMethods) {
670      print_code();
671      if (oop_maps != NULL) {
672        oop_maps->print();
673      }
674    }
675    if (PrintRelocations) {
676      print_relocations();
677    }
678    if (xtty != NULL) {
679      xtty->tail("print_native_nmethod");
680    }
681  }
682}
683
684void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
685  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
686}
687
688nmethod::nmethod(
689  Method* method,
690  int nmethod_size,
691  int compile_id,
692  int entry_bci,
693  CodeOffsets* offsets,
694  int orig_pc_offset,
695  DebugInformationRecorder* debug_info,
696  Dependencies* dependencies,
697  CodeBuffer *code_buffer,
698  int frame_size,
699  OopMapSet* oop_maps,
700  ExceptionHandlerTable* handler_table,
701  ImplicitExceptionTable* nul_chk_table,
702  AbstractCompiler* compiler,
703  int comp_level
704  )
705  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
706             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
707  _native_receiver_sp_offset(in_ByteSize(-1)),
708  _native_basic_lock_sp_offset(in_ByteSize(-1))
709{
710  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
711  {
712    debug_only(No_Safepoint_Verifier nsv;)
713    assert_locked_or_safepoint(CodeCache_lock);
714
715    init_defaults();
716    _method                  = method;
717    _entry_bci               = entry_bci;
718    _compile_id              = compile_id;
719    _comp_level              = comp_level;
720    _compiler                = compiler;
721    _orig_pc_offset          = orig_pc_offset;
722    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
723
724    // Section offsets
725    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
726    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
727
728    // Exception handler and deopt handler are in the stub section
729    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
730    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
731    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
732    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
733    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
734      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
735    } else {
736      _deoptimize_mh_offset  = -1;
737    }
738    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
739      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
740    } else {
741      _unwind_handler_offset = -1;
742    }
743
744    _oops_offset             = data_offset();
745    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
746    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
747
748    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
749    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
750    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
751    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
752    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
753
754    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
755    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
756    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
757    _exception_cache         = NULL;
758    _pc_desc_cache.reset_to(scopes_pcs_begin());
759
760    // Copy contents of ScopeDescRecorder to nmethod
761    code_buffer->copy_values_to(this);
762    debug_info->copy_to(this);
763    dependencies->copy_to(this);
764    if (ScavengeRootsInCode) {
765      if (detect_scavenge_root_oops()) {
766        CodeCache::add_scavenge_root_nmethod(this);
767      }
768      Universe::heap()->register_nmethod(this);
769    }
770    debug_only(verify_scavenge_root_oops());
771
772    CodeCache::commit(this);
773
774    // Copy contents of ExceptionHandlerTable to nmethod
775    handler_table->copy_to(this);
776    nul_chk_table->copy_to(this);
777
778    // we use the information of entry points to find out if a method is
779    // static or non static
780    assert(compiler->is_c2() ||
781           _method->is_static() == (entry_point() == _verified_entry_point),
782           " entry points must be same for static methods and vice versa");
783  }
784
785  bool printnmethods = PrintNMethods
786    || CompilerOracle::should_print(_method)
787    || CompilerOracle::has_option_string(_method, "PrintNMethods");
788  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
789    print_nmethod(printnmethods);
790  }
791}
792
793
794// Print a short set of xml attributes to identify this nmethod.  The
795// output should be embedded in some other element.
796void nmethod::log_identity(xmlStream* log) const {
797  log->print(" compile_id='%d'", compile_id());
798  const char* nm_kind = compile_kind();
799  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
800  if (compiler() != NULL) {
801    log->print(" compiler='%s'", compiler()->name());
802  }
803  if (TieredCompilation) {
804    log->print(" level='%d'", comp_level());
805  }
806}
807
808
809#define LOG_OFFSET(log, name)                    \
810  if (p2i(name##_end()) - p2i(name##_begin())) \
811    log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
812               p2i(name##_begin()) - p2i(this))
813
814
815void nmethod::log_new_nmethod() const {
816  if (LogCompilation && xtty != NULL) {
817    ttyLocker ttyl;
818    HandleMark hm;
819    xtty->begin_elem("nmethod");
820    log_identity(xtty);
821    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
822    xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
823
824    LOG_OFFSET(xtty, relocation);
825    LOG_OFFSET(xtty, consts);
826    LOG_OFFSET(xtty, insts);
827    LOG_OFFSET(xtty, stub);
828    LOG_OFFSET(xtty, scopes_data);
829    LOG_OFFSET(xtty, scopes_pcs);
830    LOG_OFFSET(xtty, dependencies);
831    LOG_OFFSET(xtty, handler_table);
832    LOG_OFFSET(xtty, nul_chk_table);
833    LOG_OFFSET(xtty, oops);
834
835    xtty->method(method());
836    xtty->stamp();
837    xtty->end_elem();
838  }
839}
840
841#undef LOG_OFFSET
842
843
844// Print out more verbose output usually for a newly created nmethod.
845void nmethod::print_on(outputStream* st, const char* msg) const {
846  if (st != NULL) {
847    ttyLocker ttyl;
848    if (WizardMode) {
849      CompileTask::print(st, this, msg, /*short_form:*/ true);
850      st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
851    } else {
852      CompileTask::print(st, this, msg, /*short_form:*/ false);
853    }
854  }
855}
856
857
858void nmethod::print_nmethod(bool printmethod) {
859  ttyLocker ttyl;  // keep the following output all in one block
860  if (xtty != NULL) {
861    xtty->begin_head("print_nmethod");
862    xtty->stamp();
863    xtty->end_head();
864  }
865  // print the header part first
866  print();
867  // then print the requested information
868  if (printmethod) {
869    print_code();
870    print_pcs();
871    if (oop_maps()) {
872      oop_maps()->print();
873    }
874  }
875  if (PrintDebugInfo) {
876    print_scopes();
877  }
878  if (PrintRelocations) {
879    print_relocations();
880  }
881  if (PrintDependencies) {
882    print_dependencies();
883  }
884  if (PrintExceptionHandlers) {
885    print_handler_table();
886    print_nul_chk_table();
887  }
888  if (xtty != NULL) {
889    xtty->tail("print_nmethod");
890  }
891}
892
893
894// Promote one word from an assembly-time handle to a live embedded oop.
895inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
896  if (handle == NULL ||
897      // As a special case, IC oops are initialized to 1 or -1.
898      handle == (jobject) Universe::non_oop_word()) {
899    (*dest) = (oop) handle;
900  } else {
901    (*dest) = JNIHandles::resolve_non_null(handle);
902  }
903}
904
905
906// Have to have the same name because it's called by a template
907void nmethod::copy_values(GrowableArray<jobject>* array) {
908  int length = array->length();
909  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
910  oop* dest = oops_begin();
911  for (int index = 0 ; index < length; index++) {
912    initialize_immediate_oop(&dest[index], array->at(index));
913  }
914
915  // Now we can fix up all the oops in the code.  We need to do this
916  // in the code because the assembler uses jobjects as placeholders.
917  // The code and relocations have already been initialized by the
918  // CodeBlob constructor, so it is valid even at this early point to
919  // iterate over relocations and patch the code.
920  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
921}
922
923void nmethod::copy_values(GrowableArray<Metadata*>* array) {
924  int length = array->length();
925  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
926  Metadata** dest = metadata_begin();
927  for (int index = 0 ; index < length; index++) {
928    dest[index] = array->at(index);
929  }
930}
931
932bool nmethod::is_at_poll_return(address pc) {
933  RelocIterator iter(this, pc, pc+1);
934  while (iter.next()) {
935    if (iter.type() == relocInfo::poll_return_type)
936      return true;
937  }
938  return false;
939}
940
941
942bool nmethod::is_at_poll_or_poll_return(address pc) {
943  RelocIterator iter(this, pc, pc+1);
944  while (iter.next()) {
945    relocInfo::relocType t = iter.type();
946    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
947      return true;
948  }
949  return false;
950}
951
952
953void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
954  // re-patch all oop-bearing instructions, just in case some oops moved
955  RelocIterator iter(this, begin, end);
956  while (iter.next()) {
957    if (iter.type() == relocInfo::oop_type) {
958      oop_Relocation* reloc = iter.oop_reloc();
959      if (initialize_immediates && reloc->oop_is_immediate()) {
960        oop* dest = reloc->oop_addr();
961        initialize_immediate_oop(dest, (jobject) *dest);
962      }
963      // Refresh the oop-related bits of this instruction.
964      reloc->fix_oop_relocation();
965    } else if (iter.type() == relocInfo::metadata_type) {
966      metadata_Relocation* reloc = iter.metadata_reloc();
967      reloc->fix_metadata_relocation();
968    }
969  }
970}
971
972
973void nmethod::verify_oop_relocations() {
974  // Ensure sure that the code matches the current oop values
975  RelocIterator iter(this, NULL, NULL);
976  while (iter.next()) {
977    if (iter.type() == relocInfo::oop_type) {
978      oop_Relocation* reloc = iter.oop_reloc();
979      if (!reloc->oop_is_immediate()) {
980        reloc->verify_oop_relocation();
981      }
982    }
983  }
984}
985
986
987ScopeDesc* nmethod::scope_desc_at(address pc) {
988  PcDesc* pd = pc_desc_at(pc);
989  guarantee(pd != NULL, "scope must be present");
990  return new ScopeDesc(this, pd->scope_decode_offset(),
991                       pd->obj_decode_offset(), pd->should_reexecute(),
992                       pd->return_oop());
993}
994
995
996void nmethod::clear_inline_caches() {
997  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
998  if (is_zombie()) {
999    return;
1000  }
1001
1002  RelocIterator iter(this);
1003  while (iter.next()) {
1004    iter.reloc()->clear_inline_cache();
1005  }
1006}
1007
1008// Clear ICStubs of all compiled ICs
1009void nmethod::clear_ic_stubs() {
1010  assert_locked_or_safepoint(CompiledIC_lock);
1011  RelocIterator iter(this);
1012  while(iter.next()) {
1013    if (iter.type() == relocInfo::virtual_call_type) {
1014      CompiledIC* ic = CompiledIC_at(&iter);
1015      ic->clear_ic_stub();
1016    }
1017  }
1018}
1019
1020
1021void nmethod::cleanup_inline_caches() {
1022  assert_locked_or_safepoint(CompiledIC_lock);
1023
1024  // If the method is not entrant or zombie then a JMP is plastered over the
1025  // first few bytes.  If an oop in the old code was there, that oop
1026  // should not get GC'd.  Skip the first few bytes of oops on
1027  // not-entrant methods.
1028  address low_boundary = verified_entry_point();
1029  if (!is_in_use()) {
1030    low_boundary += NativeJump::instruction_size;
1031    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1032    // This means that the low_boundary is going to be a little too high.
1033    // This shouldn't matter, since oops of non-entrant methods are never used.
1034    // In fact, why are we bothering to look at oops in a non-entrant method??
1035  }
1036
1037  // Find all calls in an nmethod and clear the ones that point to non-entrant,
1038  // zombie and unloaded nmethods.
1039  ResourceMark rm;
1040  RelocIterator iter(this, low_boundary);
1041  while(iter.next()) {
1042    switch(iter.type()) {
1043      case relocInfo::virtual_call_type:
1044      case relocInfo::opt_virtual_call_type: {
1045        CompiledIC *ic = CompiledIC_at(&iter);
1046        // Ok, to lookup references to zombies here
1047        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1048        if( cb != NULL && cb->is_nmethod() ) {
1049          nmethod* nm = (nmethod*)cb;
1050          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1051          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
1052        }
1053        break;
1054      }
1055      case relocInfo::static_call_type: {
1056        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1057        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1058        if( cb != NULL && cb->is_nmethod() ) {
1059          nmethod* nm = (nmethod*)cb;
1060          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1061          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1062        }
1063        break;
1064      }
1065    }
1066  }
1067}
1068
1069void nmethod::verify_clean_inline_caches() {
1070  assert_locked_or_safepoint(CompiledIC_lock);
1071
1072  // If the method is not entrant or zombie then a JMP is plastered over the
1073  // first few bytes.  If an oop in the old code was there, that oop
1074  // should not get GC'd.  Skip the first few bytes of oops on
1075  // not-entrant methods.
1076  address low_boundary = verified_entry_point();
1077  if (!is_in_use()) {
1078    low_boundary += NativeJump::instruction_size;
1079    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1080    // This means that the low_boundary is going to be a little too high.
1081    // This shouldn't matter, since oops of non-entrant methods are never used.
1082    // In fact, why are we bothering to look at oops in a non-entrant method??
1083  }
1084
1085  ResourceMark rm;
1086  RelocIterator iter(this, low_boundary);
1087  while(iter.next()) {
1088    switch(iter.type()) {
1089      case relocInfo::virtual_call_type:
1090      case relocInfo::opt_virtual_call_type: {
1091        CompiledIC *ic = CompiledIC_at(&iter);
1092        // Ok, to lookup references to zombies here
1093        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1094        if( cb != NULL && cb->is_nmethod() ) {
1095          nmethod* nm = (nmethod*)cb;
1096          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1097          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1098            assert(ic->is_clean(), "IC should be clean");
1099          }
1100        }
1101        break;
1102      }
1103      case relocInfo::static_call_type: {
1104        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1105        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1106        if( cb != NULL && cb->is_nmethod() ) {
1107          nmethod* nm = (nmethod*)cb;
1108          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1109          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1110            assert(csc->is_clean(), "IC should be clean");
1111          }
1112        }
1113        break;
1114      }
1115    }
1116  }
1117}
1118
1119int nmethod::verify_icholder_relocations() {
1120  int count = 0;
1121
1122  RelocIterator iter(this);
1123  while(iter.next()) {
1124    if (iter.type() == relocInfo::virtual_call_type) {
1125      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1126        CompiledIC *ic = CompiledIC_at(&iter);
1127        if (TraceCompiledIC) {
1128          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1129          ic->print();
1130        }
1131        assert(ic->cached_icholder() != NULL, "must be non-NULL");
1132        count++;
1133      }
1134    }
1135  }
1136
1137  return count;
1138}
1139
1140// This is a private interface with the sweeper.
1141void nmethod::mark_as_seen_on_stack() {
1142  assert(is_alive(), "Must be an alive method");
1143  // Set the traversal mark to ensure that the sweeper does 2
1144  // cleaning passes before moving to zombie.
1145  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1146}
1147
1148// Tell if a non-entrant method can be converted to a zombie (i.e.,
1149// there are no activations on the stack, not in use by the VM,
1150// and not in use by the ServiceThread)
1151bool nmethod::can_convert_to_zombie() {
1152  assert(is_not_entrant(), "must be a non-entrant method");
1153
1154  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1155  // count can be greater than the stack traversal count before it hits the
1156  // nmethod for the second time.
1157  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1158         !is_locked_by_vm();
1159}
1160
1161void nmethod::inc_decompile_count() {
1162  if (!is_compiled_by_c2()) return;
1163  // Could be gated by ProfileTraps, but do not bother...
1164  Method* m = method();
1165  if (m == NULL)  return;
1166  MethodData* mdo = m->method_data();
1167  if (mdo == NULL)  return;
1168  // There is a benign race here.  See comments in methodData.hpp.
1169  mdo->inc_decompile_count();
1170}
1171
1172void nmethod::increase_unloading_clock() {
1173  _global_unloading_clock++;
1174  if (_global_unloading_clock == 0) {
1175    // _nmethods are allocated with _unloading_clock == 0,
1176    // so 0 is never used as a clock value.
1177    _global_unloading_clock = 1;
1178  }
1179}
1180
1181void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1182  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1183}
1184
1185unsigned char nmethod::unloading_clock() {
1186  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1187}
1188
1189void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1190
1191  post_compiled_method_unload();
1192
1193  // Since this nmethod is being unloaded, make sure that dependencies
1194  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1195  // indicate that this work is being done during a GC.
1196  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1197  assert(is_alive != NULL, "Should be non-NULL");
1198  // A non-NULL is_alive closure indicates that this is being called during GC.
1199  flush_dependencies(is_alive);
1200
1201  // Break cycle between nmethod & method
1202  if (TraceClassUnloading && WizardMode) {
1203    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1204                  " unloadable], Method*(" INTPTR_FORMAT
1205                  "), cause(" INTPTR_FORMAT ")",
1206                  p2i(this), p2i(_method), p2i(cause));
1207    if (!Universe::heap()->is_gc_active())
1208      cause->klass()->print();
1209  }
1210  // Unlink the osr method, so we do not look this up again
1211  if (is_osr_method()) {
1212    invalidate_osr_method();
1213  }
1214  // If _method is already NULL the Method* is about to be unloaded,
1215  // so we don't have to break the cycle. Note that it is possible to
1216  // have the Method* live here, in case we unload the nmethod because
1217  // it is pointing to some oop (other than the Method*) being unloaded.
1218  if (_method != NULL) {
1219    // OSR methods point to the Method*, but the Method* does not
1220    // point back!
1221    if (_method->code() == this) {
1222      _method->clear_code(); // Break a cycle
1223    }
1224    _method = NULL;            // Clear the method of this dead nmethod
1225  }
1226  // Make the class unloaded - i.e., change state and notify sweeper
1227  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1228  if (is_in_use()) {
1229    // Transitioning directly from live to unloaded -- so
1230    // we need to force a cache clean-up; remember this
1231    // for later on.
1232    CodeCache::set_needs_cache_clean(true);
1233  }
1234
1235  // Unregister must be done before the state change
1236  Universe::heap()->unregister_nmethod(this);
1237
1238  _state = unloaded;
1239
1240  // Log the unloading.
1241  log_state_change();
1242
1243  // The Method* is gone at this point
1244  assert(_method == NULL, "Tautology");
1245
1246  set_osr_link(NULL);
1247  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1248  NMethodSweeper::report_state_change(this);
1249}
1250
1251void nmethod::invalidate_osr_method() {
1252  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1253  // Remove from list of active nmethods
1254  if (method() != NULL)
1255    method()->method_holder()->remove_osr_nmethod(this);
1256}
1257
1258void nmethod::log_state_change() const {
1259  if (LogCompilation) {
1260    if (xtty != NULL) {
1261      ttyLocker ttyl;  // keep the following output all in one block
1262      if (_state == unloaded) {
1263        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1264                         os::current_thread_id());
1265      } else {
1266        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1267                         os::current_thread_id(),
1268                         (_state == zombie ? " zombie='1'" : ""));
1269      }
1270      log_identity(xtty);
1271      xtty->stamp();
1272      xtty->end_elem();
1273    }
1274  }
1275  if (PrintCompilation && _state != unloaded) {
1276    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1277  }
1278}
1279
1280/**
1281 * Common functionality for both make_not_entrant and make_zombie
1282 */
1283bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1284  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1285  assert(!is_zombie(), "should not already be a zombie");
1286
1287  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1288  nmethodLocker nml(this);
1289  methodHandle the_method(method());
1290  No_Safepoint_Verifier nsv;
1291
1292  // during patching, depending on the nmethod state we must notify the GC that
1293  // code has been unloaded, unregistering it. We cannot do this right while
1294  // holding the Patching_lock because we need to use the CodeCache_lock. This
1295  // would be prone to deadlocks.
1296  // This flag is used to remember whether we need to later lock and unregister.
1297  bool nmethod_needs_unregister = false;
1298
1299  {
1300    // invalidate osr nmethod before acquiring the patching lock since
1301    // they both acquire leaf locks and we don't want a deadlock.
1302    // This logic is equivalent to the logic below for patching the
1303    // verified entry point of regular methods.
1304    if (is_osr_method()) {
1305      // this effectively makes the osr nmethod not entrant
1306      invalidate_osr_method();
1307    }
1308
1309    // Enter critical section.  Does not block for safepoint.
1310    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1311
1312    if (_state == state) {
1313      // another thread already performed this transition so nothing
1314      // to do, but return false to indicate this.
1315      return false;
1316    }
1317
1318    // The caller can be calling the method statically or through an inline
1319    // cache call.
1320    if (!is_osr_method() && !is_not_entrant()) {
1321      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1322                  SharedRuntime::get_handle_wrong_method_stub());
1323    }
1324
1325    if (is_in_use()) {
1326      // It's a true state change, so mark the method as decompiled.
1327      // Do it only for transition from alive.
1328      inc_decompile_count();
1329    }
1330
1331    // If the state is becoming a zombie, signal to unregister the nmethod with
1332    // the heap.
1333    // This nmethod may have already been unloaded during a full GC.
1334    if ((state == zombie) && !is_unloaded()) {
1335      nmethod_needs_unregister = true;
1336    }
1337
1338    // Must happen before state change. Otherwise we have a race condition in
1339    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1340    // transition its state from 'not_entrant' to 'zombie' without having to wait
1341    // for stack scanning.
1342    if (state == not_entrant) {
1343      mark_as_seen_on_stack();
1344      OrderAccess::storestore();
1345    }
1346
1347    // Change state
1348    _state = state;
1349
1350    // Log the transition once
1351    log_state_change();
1352
1353    // Remove nmethod from method.
1354    // We need to check if both the _code and _from_compiled_code_entry_point
1355    // refer to this nmethod because there is a race in setting these two fields
1356    // in Method* as seen in bugid 4947125.
1357    // If the vep() points to the zombie nmethod, the memory for the nmethod
1358    // could be flushed and the compiler and vtable stubs could still call
1359    // through it.
1360    if (method() != NULL && (method()->code() == this ||
1361                             method()->from_compiled_entry() == verified_entry_point())) {
1362      HandleMark hm;
1363      method()->clear_code();
1364    }
1365  } // leave critical region under Patching_lock
1366
1367  // When the nmethod becomes zombie it is no longer alive so the
1368  // dependencies must be flushed.  nmethods in the not_entrant
1369  // state will be flushed later when the transition to zombie
1370  // happens or they get unloaded.
1371  if (state == zombie) {
1372    {
1373      // Flushing dependecies must be done before any possible
1374      // safepoint can sneak in, otherwise the oops used by the
1375      // dependency logic could have become stale.
1376      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1377      if (nmethod_needs_unregister) {
1378        Universe::heap()->unregister_nmethod(this);
1379      }
1380      flush_dependencies(NULL);
1381    }
1382
1383    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1384    // event and it hasn't already been reported for this nmethod then
1385    // report it now. The event may have been reported earilier if the GC
1386    // marked it for unloading). JvmtiDeferredEventQueue support means
1387    // we no longer go to a safepoint here.
1388    post_compiled_method_unload();
1389
1390#ifdef ASSERT
1391    // It's no longer safe to access the oops section since zombie
1392    // nmethods aren't scanned for GC.
1393    _oops_are_stale = true;
1394#endif
1395     // the Method may be reclaimed by class unloading now that the
1396     // nmethod is in zombie state
1397    set_method(NULL);
1398  } else {
1399    assert(state == not_entrant, "other cases may need to be handled differently");
1400  }
1401
1402  if (TraceCreateZombies) {
1403    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s",
1404                  p2i(this), (state == not_entrant) ? "not entrant" : "zombie");
1405  }
1406
1407  NMethodSweeper::report_state_change(this);
1408  return true;
1409}
1410
1411void nmethod::flush() {
1412  // Note that there are no valid oops in the nmethod anymore.
1413  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1414  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1415
1416  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1417  assert_locked_or_safepoint(CodeCache_lock);
1418
1419  // completely deallocate this method
1420  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
1421  if (PrintMethodFlushing) {
1422    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
1423                  "/Free CodeCache:" SIZE_FORMAT "Kb",
1424                  _compile_id, p2i(this), CodeCache::nof_blobs(),
1425                  CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1426  }
1427
1428  // We need to deallocate any ExceptionCache data.
1429  // Note that we do not need to grab the nmethod lock for this, it
1430  // better be thread safe if we're disposing of it!
1431  ExceptionCache* ec = exception_cache();
1432  set_exception_cache(NULL);
1433  while(ec != NULL) {
1434    ExceptionCache* next = ec->next();
1435    delete ec;
1436    ec = next;
1437  }
1438
1439  if (on_scavenge_root_list()) {
1440    CodeCache::drop_scavenge_root_nmethod(this);
1441  }
1442
1443#ifdef SHARK
1444  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1445#endif // SHARK
1446
1447  ((CodeBlob*)(this))->flush();
1448
1449  CodeCache::free(this);
1450}
1451
1452//
1453// Notify all classes this nmethod is dependent on that it is no
1454// longer dependent. This should only be called in two situations.
1455// First, when a nmethod transitions to a zombie all dependents need
1456// to be clear.  Since zombification happens at a safepoint there's no
1457// synchronization issues.  The second place is a little more tricky.
1458// During phase 1 of mark sweep class unloading may happen and as a
1459// result some nmethods may get unloaded.  In this case the flushing
1460// of dependencies must happen during phase 1 since after GC any
1461// dependencies in the unloaded nmethod won't be updated, so
1462// traversing the dependency information in unsafe.  In that case this
1463// function is called with a non-NULL argument and this function only
1464// notifies instanceKlasses that are reachable
1465
1466void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1467  assert_locked_or_safepoint(CodeCache_lock);
1468  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1469  "is_alive is non-NULL if and only if we are called during GC");
1470  if (!has_flushed_dependencies()) {
1471    set_has_flushed_dependencies();
1472    for (Dependencies::DepStream deps(this); deps.next(); ) {
1473      if (deps.type() == Dependencies::call_site_target_value) {
1474        // CallSite dependencies are managed on per-CallSite instance basis.
1475        oop call_site = deps.argument_oop(0);
1476        MethodHandles::remove_dependent_nmethod(call_site, this);
1477      } else {
1478        Klass* klass = deps.context_type();
1479        if (klass == NULL) {
1480          continue;  // ignore things like evol_method
1481        }
1482        // During GC the is_alive closure is non-NULL, and is used to
1483        // determine liveness of dependees that need to be updated.
1484        if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1485          InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1486        }
1487      }
1488    }
1489  }
1490}
1491
1492
1493// If this oop is not live, the nmethod can be unloaded.
1494bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1495  assert(root != NULL, "just checking");
1496  oop obj = *root;
1497  if (obj == NULL || is_alive->do_object_b(obj)) {
1498      return false;
1499  }
1500
1501  // If ScavengeRootsInCode is true, an nmethod might be unloaded
1502  // simply because one of its constant oops has gone dead.
1503  // No actual classes need to be unloaded in order for this to occur.
1504  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1505  make_unloaded(is_alive, obj);
1506  return true;
1507}
1508
1509// ------------------------------------------------------------------
1510// post_compiled_method_load_event
1511// new method for install_code() path
1512// Transfer information from compilation to jvmti
1513void nmethod::post_compiled_method_load_event() {
1514
1515  Method* moop = method();
1516  HOTSPOT_COMPILED_METHOD_LOAD(
1517      (char *) moop->klass_name()->bytes(),
1518      moop->klass_name()->utf8_length(),
1519      (char *) moop->name()->bytes(),
1520      moop->name()->utf8_length(),
1521      (char *) moop->signature()->bytes(),
1522      moop->signature()->utf8_length(),
1523      insts_begin(), insts_size());
1524
1525  if (JvmtiExport::should_post_compiled_method_load() ||
1526      JvmtiExport::should_post_compiled_method_unload()) {
1527    get_and_cache_jmethod_id();
1528  }
1529
1530  if (JvmtiExport::should_post_compiled_method_load()) {
1531    // Let the Service thread (which is a real Java thread) post the event
1532    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1533    JvmtiDeferredEventQueue::enqueue(
1534      JvmtiDeferredEvent::compiled_method_load_event(this));
1535  }
1536}
1537
1538jmethodID nmethod::get_and_cache_jmethod_id() {
1539  if (_jmethod_id == NULL) {
1540    // Cache the jmethod_id since it can no longer be looked up once the
1541    // method itself has been marked for unloading.
1542    _jmethod_id = method()->jmethod_id();
1543  }
1544  return _jmethod_id;
1545}
1546
1547void nmethod::post_compiled_method_unload() {
1548  if (unload_reported()) {
1549    // During unloading we transition to unloaded and then to zombie
1550    // and the unloading is reported during the first transition.
1551    return;
1552  }
1553
1554  assert(_method != NULL && !is_unloaded(), "just checking");
1555  DTRACE_METHOD_UNLOAD_PROBE(method());
1556
1557  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1558  // post the event. Sometime later this nmethod will be made a zombie
1559  // by the sweeper but the Method* will not be valid at that point.
1560  // If the _jmethod_id is null then no load event was ever requested
1561  // so don't bother posting the unload.  The main reason for this is
1562  // that the jmethodID is a weak reference to the Method* so if
1563  // it's being unloaded there's no way to look it up since the weak
1564  // ref will have been cleared.
1565  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1566    assert(!unload_reported(), "already unloaded");
1567    JvmtiDeferredEvent event =
1568      JvmtiDeferredEvent::compiled_method_unload_event(this,
1569          _jmethod_id, insts_begin());
1570    if (SafepointSynchronize::is_at_safepoint()) {
1571      // Don't want to take the queueing lock. Add it as pending and
1572      // it will get enqueued later.
1573      JvmtiDeferredEventQueue::add_pending_event(event);
1574    } else {
1575      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1576      JvmtiDeferredEventQueue::enqueue(event);
1577    }
1578  }
1579
1580  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1581  // any time. As the nmethod is being unloaded now we mark it has
1582  // having the unload event reported - this will ensure that we don't
1583  // attempt to report the event in the unlikely scenario where the
1584  // event is enabled at the time the nmethod is made a zombie.
1585  set_unload_reported();
1586}
1587
1588void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1589  if (ic->is_icholder_call()) {
1590    // The only exception is compiledICHolder oops which may
1591    // yet be marked below. (We check this further below).
1592    CompiledICHolder* cichk_oop = ic->cached_icholder();
1593
1594    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1595        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1596      return;
1597    }
1598  } else {
1599    Metadata* ic_oop = ic->cached_metadata();
1600    if (ic_oop != NULL) {
1601      if (ic_oop->is_klass()) {
1602        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1603          return;
1604        }
1605      } else if (ic_oop->is_method()) {
1606        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1607          return;
1608        }
1609      } else {
1610        ShouldNotReachHere();
1611      }
1612    }
1613  }
1614
1615  ic->set_to_clean();
1616}
1617
1618// This is called at the end of the strong tracing/marking phase of a
1619// GC to unload an nmethod if it contains otherwise unreachable
1620// oops.
1621
1622void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1623  // Make sure the oop's ready to receive visitors
1624  assert(!is_zombie() && !is_unloaded(),
1625         "should not call follow on zombie or unloaded nmethod");
1626
1627  // If the method is not entrant then a JMP is plastered over the
1628  // first few bytes.  If an oop in the old code was there, that oop
1629  // should not get GC'd.  Skip the first few bytes of oops on
1630  // not-entrant methods.
1631  address low_boundary = verified_entry_point();
1632  if (is_not_entrant()) {
1633    low_boundary += NativeJump::instruction_size;
1634    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1635    // (See comment above.)
1636  }
1637
1638  // The RedefineClasses() API can cause the class unloading invariant
1639  // to no longer be true. See jvmtiExport.hpp for details.
1640  // Also, leave a debugging breadcrumb in local flag.
1641  if (JvmtiExport::has_redefined_a_class()) {
1642    // This set of the unloading_occurred flag is done before the
1643    // call to post_compiled_method_unload() so that the unloading
1644    // of this nmethod is reported.
1645    unloading_occurred = true;
1646  }
1647
1648  // Exception cache
1649  clean_exception_cache(is_alive);
1650
1651  // If class unloading occurred we first iterate over all inline caches and
1652  // clear ICs where the cached oop is referring to an unloaded klass or method.
1653  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1654  // iteration below.
1655  if (unloading_occurred) {
1656    RelocIterator iter(this, low_boundary);
1657    while(iter.next()) {
1658      if (iter.type() == relocInfo::virtual_call_type) {
1659        CompiledIC *ic = CompiledIC_at(&iter);
1660        clean_ic_if_metadata_is_dead(ic, is_alive);
1661      }
1662    }
1663  }
1664
1665  // Compiled code
1666  {
1667  RelocIterator iter(this, low_boundary);
1668  while (iter.next()) {
1669    if (iter.type() == relocInfo::oop_type) {
1670      oop_Relocation* r = iter.oop_reloc();
1671      // In this loop, we must only traverse those oops directly embedded in
1672      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1673      assert(1 == (r->oop_is_immediate()) +
1674                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1675             "oop must be found in exactly one place");
1676      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1677        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1678          return;
1679        }
1680      }
1681    }
1682  }
1683  }
1684
1685
1686  // Scopes
1687  for (oop* p = oops_begin(); p < oops_end(); p++) {
1688    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1689    if (can_unload(is_alive, p, unloading_occurred)) {
1690      return;
1691    }
1692  }
1693
1694  // Ensure that all metadata is still alive
1695  verify_metadata_loaders(low_boundary, is_alive);
1696}
1697
1698template <class CompiledICorStaticCall>
1699static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1700  // Ok, to lookup references to zombies here
1701  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1702  if (cb != NULL && cb->is_nmethod()) {
1703    nmethod* nm = (nmethod*)cb;
1704
1705    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1706      // The nmethod has not been processed yet.
1707      return true;
1708    }
1709
1710    // Clean inline caches pointing to both zombie and not_entrant methods
1711    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1712      ic->set_to_clean();
1713      assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
1714    }
1715  }
1716
1717  return false;
1718}
1719
1720static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1721  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1722}
1723
1724static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1725  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1726}
1727
1728bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
1729  assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
1730
1731  oop_Relocation* r = iter_at_oop->oop_reloc();
1732  // Traverse those oops directly embedded in the code.
1733  // Other oops (oop_index>0) are seen as part of scopes_oops.
1734  assert(1 == (r->oop_is_immediate()) +
1735         (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1736         "oop must be found in exactly one place");
1737  if (r->oop_is_immediate() && r->oop_value() != NULL) {
1738    // Unload this nmethod if the oop is dead.
1739    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1740      return true;;
1741    }
1742  }
1743
1744  return false;
1745}
1746
1747
1748bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1749  ResourceMark rm;
1750
1751  // Make sure the oop's ready to receive visitors
1752  assert(!is_zombie() && !is_unloaded(),
1753         "should not call follow on zombie or unloaded nmethod");
1754
1755  // If the method is not entrant then a JMP is plastered over the
1756  // first few bytes.  If an oop in the old code was there, that oop
1757  // should not get GC'd.  Skip the first few bytes of oops on
1758  // not-entrant methods.
1759  address low_boundary = verified_entry_point();
1760  if (is_not_entrant()) {
1761    low_boundary += NativeJump::instruction_size;
1762    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1763    // (See comment above.)
1764  }
1765
1766  // The RedefineClasses() API can cause the class unloading invariant
1767  // to no longer be true. See jvmtiExport.hpp for details.
1768  // Also, leave a debugging breadcrumb in local flag.
1769  if (JvmtiExport::has_redefined_a_class()) {
1770    // This set of the unloading_occurred flag is done before the
1771    // call to post_compiled_method_unload() so that the unloading
1772    // of this nmethod is reported.
1773    unloading_occurred = true;
1774  }
1775
1776  // Exception cache
1777  clean_exception_cache(is_alive);
1778
1779  bool is_unloaded = false;
1780  bool postponed = false;
1781
1782  RelocIterator iter(this, low_boundary);
1783  while(iter.next()) {
1784
1785    switch (iter.type()) {
1786
1787    case relocInfo::virtual_call_type:
1788      if (unloading_occurred) {
1789        // If class unloading occurred we first iterate over all inline caches and
1790        // clear ICs where the cached oop is referring to an unloaded klass or method.
1791        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1792      }
1793
1794      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1795      break;
1796
1797    case relocInfo::opt_virtual_call_type:
1798      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1799      break;
1800
1801    case relocInfo::static_call_type:
1802      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1803      break;
1804
1805    case relocInfo::oop_type:
1806      if (!is_unloaded) {
1807        is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
1808      }
1809      break;
1810
1811    case relocInfo::metadata_type:
1812      break; // nothing to do.
1813    }
1814  }
1815
1816  if (is_unloaded) {
1817    return postponed;
1818  }
1819
1820  // Scopes
1821  for (oop* p = oops_begin(); p < oops_end(); p++) {
1822    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1823    if (can_unload(is_alive, p, unloading_occurred)) {
1824      is_unloaded = true;
1825      break;
1826    }
1827  }
1828
1829  if (is_unloaded) {
1830    return postponed;
1831  }
1832
1833  // Ensure that all metadata is still alive
1834  verify_metadata_loaders(low_boundary, is_alive);
1835
1836  return postponed;
1837}
1838
1839void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
1840  ResourceMark rm;
1841
1842  // Make sure the oop's ready to receive visitors
1843  assert(!is_zombie(),
1844         "should not call follow on zombie nmethod");
1845
1846  // If the method is not entrant then a JMP is plastered over the
1847  // first few bytes.  If an oop in the old code was there, that oop
1848  // should not get GC'd.  Skip the first few bytes of oops on
1849  // not-entrant methods.
1850  address low_boundary = verified_entry_point();
1851  if (is_not_entrant()) {
1852    low_boundary += NativeJump::instruction_size;
1853    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1854    // (See comment above.)
1855  }
1856
1857  RelocIterator iter(this, low_boundary);
1858  while(iter.next()) {
1859
1860    switch (iter.type()) {
1861
1862    case relocInfo::virtual_call_type:
1863      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1864      break;
1865
1866    case relocInfo::opt_virtual_call_type:
1867      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1868      break;
1869
1870    case relocInfo::static_call_type:
1871      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1872      break;
1873    }
1874  }
1875}
1876
1877#ifdef ASSERT
1878
1879class CheckClass : AllStatic {
1880  static BoolObjectClosure* _is_alive;
1881
1882  // Check class_loader is alive for this bit of metadata.
1883  static void check_class(Metadata* md) {
1884    Klass* klass = NULL;
1885    if (md->is_klass()) {
1886      klass = ((Klass*)md);
1887    } else if (md->is_method()) {
1888      klass = ((Method*)md)->method_holder();
1889    } else if (md->is_methodData()) {
1890      klass = ((MethodData*)md)->method()->method_holder();
1891    } else {
1892      md->print();
1893      ShouldNotReachHere();
1894    }
1895    assert(klass->is_loader_alive(_is_alive), "must be alive");
1896  }
1897 public:
1898  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
1899    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
1900    _is_alive = is_alive;
1901    nm->metadata_do(check_class);
1902  }
1903};
1904
1905// This is called during a safepoint so can use static data
1906BoolObjectClosure* CheckClass::_is_alive = NULL;
1907#endif // ASSERT
1908
1909
1910// Processing of oop references should have been sufficient to keep
1911// all strong references alive.  Any weak references should have been
1912// cleared as well.  Visit all the metadata and ensure that it's
1913// really alive.
1914void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
1915#ifdef ASSERT
1916    RelocIterator iter(this, low_boundary);
1917    while (iter.next()) {
1918    // static_stub_Relocations may have dangling references to
1919    // Method*s so trim them out here.  Otherwise it looks like
1920    // compiled code is maintaining a link to dead metadata.
1921    address static_call_addr = NULL;
1922    if (iter.type() == relocInfo::opt_virtual_call_type) {
1923      CompiledIC* cic = CompiledIC_at(&iter);
1924      if (!cic->is_call_to_interpreted()) {
1925        static_call_addr = iter.addr();
1926      }
1927    } else if (iter.type() == relocInfo::static_call_type) {
1928      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
1929      if (!csc->is_call_to_interpreted()) {
1930        static_call_addr = iter.addr();
1931      }
1932    }
1933    if (static_call_addr != NULL) {
1934      RelocIterator sciter(this, low_boundary);
1935      while (sciter.next()) {
1936        if (sciter.type() == relocInfo::static_stub_type &&
1937            sciter.static_stub_reloc()->static_call() == static_call_addr) {
1938          sciter.static_stub_reloc()->clear_inline_cache();
1939        }
1940      }
1941    }
1942  }
1943  // Check that the metadata embedded in the nmethod is alive
1944  CheckClass::do_check_class(is_alive, this);
1945#endif
1946}
1947
1948
1949// Iterate over metadata calling this function.   Used by RedefineClasses
1950void nmethod::metadata_do(void f(Metadata*)) {
1951  address low_boundary = verified_entry_point();
1952  if (is_not_entrant()) {
1953    low_boundary += NativeJump::instruction_size;
1954    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1955    // (See comment above.)
1956  }
1957  {
1958    // Visit all immediate references that are embedded in the instruction stream.
1959    RelocIterator iter(this, low_boundary);
1960    while (iter.next()) {
1961      if (iter.type() == relocInfo::metadata_type ) {
1962        metadata_Relocation* r = iter.metadata_reloc();
1963        // In this metadata, we must only follow those metadatas directly embedded in
1964        // the code.  Other metadatas (oop_index>0) are seen as part of
1965        // the metadata section below.
1966        assert(1 == (r->metadata_is_immediate()) +
1967               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1968               "metadata must be found in exactly one place");
1969        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1970          Metadata* md = r->metadata_value();
1971          if (md != _method) f(md);
1972        }
1973      } else if (iter.type() == relocInfo::virtual_call_type) {
1974        // Check compiledIC holders associated with this nmethod
1975        CompiledIC *ic = CompiledIC_at(&iter);
1976        if (ic->is_icholder_call()) {
1977          CompiledICHolder* cichk = ic->cached_icholder();
1978          f(cichk->holder_method());
1979          f(cichk->holder_klass());
1980        } else {
1981          Metadata* ic_oop = ic->cached_metadata();
1982          if (ic_oop != NULL) {
1983            f(ic_oop);
1984          }
1985        }
1986      }
1987    }
1988  }
1989
1990  // Visit the metadata section
1991  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1992    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
1993    Metadata* md = *p;
1994    f(md);
1995  }
1996
1997  // Visit metadata not embedded in the other places.
1998  if (_method != NULL) f(_method);
1999}
2000
2001void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2002  // make sure the oops ready to receive visitors
2003  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2004  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2005
2006  // If the method is not entrant or zombie then a JMP is plastered over the
2007  // first few bytes.  If an oop in the old code was there, that oop
2008  // should not get GC'd.  Skip the first few bytes of oops on
2009  // not-entrant methods.
2010  address low_boundary = verified_entry_point();
2011  if (is_not_entrant()) {
2012    low_boundary += NativeJump::instruction_size;
2013    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2014    // (See comment above.)
2015  }
2016
2017  RelocIterator iter(this, low_boundary);
2018
2019  while (iter.next()) {
2020    if (iter.type() == relocInfo::oop_type ) {
2021      oop_Relocation* r = iter.oop_reloc();
2022      // In this loop, we must only follow those oops directly embedded in
2023      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2024      assert(1 == (r->oop_is_immediate()) +
2025                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2026             "oop must be found in exactly one place");
2027      if (r->oop_is_immediate() && r->oop_value() != NULL) {
2028        f->do_oop(r->oop_addr());
2029      }
2030    }
2031  }
2032
2033  // Scopes
2034  // This includes oop constants not inlined in the code stream.
2035  for (oop* p = oops_begin(); p < oops_end(); p++) {
2036    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2037    f->do_oop(p);
2038  }
2039}
2040
2041#define NMETHOD_SENTINEL ((nmethod*)badAddress)
2042
2043nmethod* volatile nmethod::_oops_do_mark_nmethods;
2044
2045// An nmethod is "marked" if its _mark_link is set non-null.
2046// Even if it is the end of the linked list, it will have a non-null link value,
2047// as long as it is on the list.
2048// This code must be MP safe, because it is used from parallel GC passes.
2049bool nmethod::test_set_oops_do_mark() {
2050  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
2051  nmethod* observed_mark_link = _oops_do_mark_link;
2052  if (observed_mark_link == NULL) {
2053    // Claim this nmethod for this thread to mark.
2054    observed_mark_link = (nmethod*)
2055      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
2056    if (observed_mark_link == NULL) {
2057
2058      // Atomically append this nmethod (now claimed) to the head of the list:
2059      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
2060      for (;;) {
2061        nmethod* required_mark_nmethods = observed_mark_nmethods;
2062        _oops_do_mark_link = required_mark_nmethods;
2063        observed_mark_nmethods = (nmethod*)
2064          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
2065        if (observed_mark_nmethods == required_mark_nmethods)
2066          break;
2067      }
2068      // Mark was clear when we first saw this guy.
2069      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
2070      return false;
2071    }
2072  }
2073  // On fall through, another racing thread marked this nmethod before we did.
2074  return true;
2075}
2076
2077void nmethod::oops_do_marking_prologue() {
2078  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
2079  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2080  // We use cmpxchg_ptr instead of regular assignment here because the user
2081  // may fork a bunch of threads, and we need them all to see the same state.
2082  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2083  guarantee(observed == NULL, "no races in this sequential code");
2084}
2085
2086void nmethod::oops_do_marking_epilogue() {
2087  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2088  nmethod* cur = _oops_do_mark_nmethods;
2089  while (cur != NMETHOD_SENTINEL) {
2090    assert(cur != NULL, "not NULL-terminated");
2091    nmethod* next = cur->_oops_do_mark_link;
2092    cur->_oops_do_mark_link = NULL;
2093    cur->verify_oop_relocations();
2094    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
2095    cur = next;
2096  }
2097  void* required = _oops_do_mark_nmethods;
2098  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2099  guarantee(observed == required, "no races in this sequential code");
2100  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
2101}
2102
2103class DetectScavengeRoot: public OopClosure {
2104  bool     _detected_scavenge_root;
2105public:
2106  DetectScavengeRoot() : _detected_scavenge_root(false)
2107  { NOT_PRODUCT(_print_nm = NULL); }
2108  bool detected_scavenge_root() { return _detected_scavenge_root; }
2109  virtual void do_oop(oop* p) {
2110    if ((*p) != NULL && (*p)->is_scavengable()) {
2111      NOT_PRODUCT(maybe_print(p));
2112      _detected_scavenge_root = true;
2113    }
2114  }
2115  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2116
2117#ifndef PRODUCT
2118  nmethod* _print_nm;
2119  void maybe_print(oop* p) {
2120    if (_print_nm == NULL)  return;
2121    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
2122    tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
2123                  p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
2124                  p2i(*p), p2i(p));
2125    (*p)->print();
2126  }
2127#endif //PRODUCT
2128};
2129
2130bool nmethod::detect_scavenge_root_oops() {
2131  DetectScavengeRoot detect_scavenge_root;
2132  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
2133  oops_do(&detect_scavenge_root);
2134  return detect_scavenge_root.detected_scavenge_root();
2135}
2136
2137// Method that knows how to preserve outgoing arguments at call. This method must be
2138// called with a frame corresponding to a Java invoke
2139void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
2140#ifndef SHARK
2141  if (!method()->is_native()) {
2142    SimpleScopeDesc ssd(this, fr.pc());
2143    Bytecode_invoke call(ssd.method(), ssd.bci());
2144    bool has_receiver = call.has_receiver();
2145    bool has_appendix = call.has_appendix();
2146    Symbol* signature = call.signature();
2147    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
2148  }
2149#endif // !SHARK
2150}
2151
2152inline bool includes(void* p, void* from, void* to) {
2153  return from <= p && p < to;
2154}
2155
2156
2157void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2158  assert(count >= 2, "must be sentinel values, at least");
2159
2160#ifdef ASSERT
2161  // must be sorted and unique; we do a binary search in find_pc_desc()
2162  int prev_offset = pcs[0].pc_offset();
2163  assert(prev_offset == PcDesc::lower_offset_limit,
2164         "must start with a sentinel");
2165  for (int i = 1; i < count; i++) {
2166    int this_offset = pcs[i].pc_offset();
2167    assert(this_offset > prev_offset, "offsets must be sorted");
2168    prev_offset = this_offset;
2169  }
2170  assert(prev_offset == PcDesc::upper_offset_limit,
2171         "must end with a sentinel");
2172#endif //ASSERT
2173
2174  // Search for MethodHandle invokes and tag the nmethod.
2175  for (int i = 0; i < count; i++) {
2176    if (pcs[i].is_method_handle_invoke()) {
2177      set_has_method_handle_invokes(true);
2178      break;
2179    }
2180  }
2181  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2182
2183  int size = count * sizeof(PcDesc);
2184  assert(scopes_pcs_size() >= size, "oob");
2185  memcpy(scopes_pcs_begin(), pcs, size);
2186
2187  // Adjust the final sentinel downward.
2188  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2189  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2190  last_pc->set_pc_offset(content_size() + 1);
2191  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2192    // Fill any rounding gaps with copies of the last record.
2193    last_pc[1] = last_pc[0];
2194  }
2195  // The following assert could fail if sizeof(PcDesc) is not
2196  // an integral multiple of oopSize (the rounding term).
2197  // If it fails, change the logic to always allocate a multiple
2198  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2199  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2200}
2201
2202void nmethod::copy_scopes_data(u_char* buffer, int size) {
2203  assert(scopes_data_size() >= size, "oob");
2204  memcpy(scopes_data_begin(), buffer, size);
2205}
2206
2207
2208#ifdef ASSERT
2209static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2210  PcDesc* lower = nm->scopes_pcs_begin();
2211  PcDesc* upper = nm->scopes_pcs_end();
2212  lower += 1; // exclude initial sentinel
2213  PcDesc* res = NULL;
2214  for (PcDesc* p = lower; p < upper; p++) {
2215    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2216    if (match_desc(p, pc_offset, approximate)) {
2217      if (res == NULL)
2218        res = p;
2219      else
2220        res = (PcDesc*) badAddress;
2221    }
2222  }
2223  return res;
2224}
2225#endif
2226
2227
2228// Finds a PcDesc with real-pc equal to "pc"
2229PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2230  address base_address = code_begin();
2231  if ((pc < base_address) ||
2232      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2233    return NULL;  // PC is wildly out of range
2234  }
2235  int pc_offset = (int) (pc - base_address);
2236
2237  // Check the PcDesc cache if it contains the desired PcDesc
2238  // (This as an almost 100% hit rate.)
2239  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2240  if (res != NULL) {
2241    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2242    return res;
2243  }
2244
2245  // Fallback algorithm: quasi-linear search for the PcDesc
2246  // Find the last pc_offset less than the given offset.
2247  // The successor must be the required match, if there is a match at all.
2248  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2249  PcDesc* lower = scopes_pcs_begin();
2250  PcDesc* upper = scopes_pcs_end();
2251  upper -= 1; // exclude final sentinel
2252  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2253
2254#define assert_LU_OK \
2255  /* invariant on lower..upper during the following search: */ \
2256  assert(lower->pc_offset() <  pc_offset, "sanity"); \
2257  assert(upper->pc_offset() >= pc_offset, "sanity")
2258  assert_LU_OK;
2259
2260  // Use the last successful return as a split point.
2261  PcDesc* mid = _pc_desc_cache.last_pc_desc();
2262  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2263  if (mid->pc_offset() < pc_offset) {
2264    lower = mid;
2265  } else {
2266    upper = mid;
2267  }
2268
2269  // Take giant steps at first (4096, then 256, then 16, then 1)
2270  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2271  const int RADIX = (1 << LOG2_RADIX);
2272  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2273    while ((mid = lower + step) < upper) {
2274      assert_LU_OK;
2275      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2276      if (mid->pc_offset() < pc_offset) {
2277        lower = mid;
2278      } else {
2279        upper = mid;
2280        break;
2281      }
2282    }
2283    assert_LU_OK;
2284  }
2285
2286  // Sneak up on the value with a linear search of length ~16.
2287  while (true) {
2288    assert_LU_OK;
2289    mid = lower + 1;
2290    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2291    if (mid->pc_offset() < pc_offset) {
2292      lower = mid;
2293    } else {
2294      upper = mid;
2295      break;
2296    }
2297  }
2298#undef assert_LU_OK
2299
2300  if (match_desc(upper, pc_offset, approximate)) {
2301    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2302    _pc_desc_cache.add_pc_desc(upper);
2303    return upper;
2304  } else {
2305    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2306    return NULL;
2307  }
2308}
2309
2310
2311void nmethod::check_all_dependencies(DepChange& changes) {
2312  // Checked dependencies are allocated into this ResourceMark
2313  ResourceMark rm;
2314
2315  // Turn off dependency tracing while actually testing dependencies.
2316  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2317
2318  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2319                            &DependencySignature::equals, 11027> DepTable;
2320
2321  DepTable* table = new DepTable();
2322
2323  // Iterate over live nmethods and check dependencies of all nmethods that are not
2324  // marked for deoptimization. A particular dependency is only checked once.
2325  NMethodIterator iter;
2326  while(iter.next()) {
2327    nmethod* nm = iter.method();
2328    // Only notify for live nmethods
2329    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2330      for (Dependencies::DepStream deps(nm); deps.next(); ) {
2331        // Construct abstraction of a dependency.
2332        DependencySignature* current_sig = new DependencySignature(deps);
2333
2334        // Determine if dependency is already checked. table->put(...) returns
2335        // 'true' if the dependency is added (i.e., was not in the hashtable).
2336        if (table->put(*current_sig, 1)) {
2337          if (deps.check_dependency() != NULL) {
2338            // Dependency checking failed. Print out information about the failed
2339            // dependency and finally fail with an assert. We can fail here, since
2340            // dependency checking is never done in a product build.
2341            tty->print_cr("Failed dependency:");
2342            changes.print();
2343            nm->print();
2344            nm->print_dependencies();
2345            assert(false, "Should have been marked for deoptimization");
2346          }
2347        }
2348      }
2349    }
2350  }
2351}
2352
2353bool nmethod::check_dependency_on(DepChange& changes) {
2354  // What has happened:
2355  // 1) a new class dependee has been added
2356  // 2) dependee and all its super classes have been marked
2357  bool found_check = false;  // set true if we are upset
2358  for (Dependencies::DepStream deps(this); deps.next(); ) {
2359    // Evaluate only relevant dependencies.
2360    if (deps.spot_check_dependency_at(changes) != NULL) {
2361      found_check = true;
2362      NOT_DEBUG(break);
2363    }
2364  }
2365  return found_check;
2366}
2367
2368bool nmethod::is_evol_dependent_on(Klass* dependee) {
2369  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2370  Array<Method*>* dependee_methods = dependee_ik->methods();
2371  for (Dependencies::DepStream deps(this); deps.next(); ) {
2372    if (deps.type() == Dependencies::evol_method) {
2373      Method* method = deps.method_argument(0);
2374      for (int j = 0; j < dependee_methods->length(); j++) {
2375        if (dependee_methods->at(j) == method) {
2376          // RC_TRACE macro has an embedded ResourceMark
2377          RC_TRACE(0x01000000,
2378            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2379            _method->method_holder()->external_name(),
2380            _method->name()->as_C_string(),
2381            _method->signature()->as_C_string(), compile_id(),
2382            method->method_holder()->external_name(),
2383            method->name()->as_C_string(),
2384            method->signature()->as_C_string()));
2385          if (TraceDependencies || LogCompilation)
2386            deps.log_dependency(dependee);
2387          return true;
2388        }
2389      }
2390    }
2391  }
2392  return false;
2393}
2394
2395// Called from mark_for_deoptimization, when dependee is invalidated.
2396bool nmethod::is_dependent_on_method(Method* dependee) {
2397  for (Dependencies::DepStream deps(this); deps.next(); ) {
2398    if (deps.type() != Dependencies::evol_method)
2399      continue;
2400    Method* method = deps.method_argument(0);
2401    if (method == dependee) return true;
2402  }
2403  return false;
2404}
2405
2406
2407bool nmethod::is_patchable_at(address instr_addr) {
2408  assert(insts_contains(instr_addr), "wrong nmethod used");
2409  if (is_zombie()) {
2410    // a zombie may never be patched
2411    return false;
2412  }
2413  return true;
2414}
2415
2416
2417address nmethod::continuation_for_implicit_exception(address pc) {
2418  // Exception happened outside inline-cache check code => we are inside
2419  // an active nmethod => use cpc to determine a return address
2420  int exception_offset = pc - code_begin();
2421  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2422#ifdef ASSERT
2423  if (cont_offset == 0) {
2424    Thread* thread = ThreadLocalStorage::get_thread_slow();
2425    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2426    HandleMark hm(thread);
2427    ResourceMark rm(thread);
2428    CodeBlob* cb = CodeCache::find_blob(pc);
2429    assert(cb != NULL && cb == this, "");
2430    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
2431    print();
2432    method()->print_codes();
2433    print_code();
2434    print_pcs();
2435  }
2436#endif
2437  if (cont_offset == 0) {
2438    // Let the normal error handling report the exception
2439    return NULL;
2440  }
2441  return code_begin() + cont_offset;
2442}
2443
2444
2445
2446void nmethod_init() {
2447  // make sure you didn't forget to adjust the filler fields
2448  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2449}
2450
2451
2452//-------------------------------------------------------------------------------------------
2453
2454
2455// QQQ might we make this work from a frame??
2456nmethodLocker::nmethodLocker(address pc) {
2457  CodeBlob* cb = CodeCache::find_blob(pc);
2458  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2459  _nm = (nmethod*)cb;
2460  lock_nmethod(_nm);
2461}
2462
2463// Only JvmtiDeferredEvent::compiled_method_unload_event()
2464// should pass zombie_ok == true.
2465void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2466  if (nm == NULL)  return;
2467  Atomic::inc(&nm->_lock_count);
2468  assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2469}
2470
2471void nmethodLocker::unlock_nmethod(nmethod* nm) {
2472  if (nm == NULL)  return;
2473  Atomic::dec(&nm->_lock_count);
2474  assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2475}
2476
2477
2478// -----------------------------------------------------------------------------
2479// nmethod::get_deopt_original_pc
2480//
2481// Return the original PC for the given PC if:
2482// (a) the given PC belongs to a nmethod and
2483// (b) it is a deopt PC
2484address nmethod::get_deopt_original_pc(const frame* fr) {
2485  if (fr->cb() == NULL)  return NULL;
2486
2487  nmethod* nm = fr->cb()->as_nmethod_or_null();
2488  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2489    return nm->get_original_pc(fr);
2490
2491  return NULL;
2492}
2493
2494
2495// -----------------------------------------------------------------------------
2496// MethodHandle
2497
2498bool nmethod::is_method_handle_return(address return_pc) {
2499  if (!has_method_handle_invokes())  return false;
2500  PcDesc* pd = pc_desc_at(return_pc);
2501  if (pd == NULL)
2502    return false;
2503  return pd->is_method_handle_invoke();
2504}
2505
2506
2507// -----------------------------------------------------------------------------
2508// Verification
2509
2510class VerifyOopsClosure: public OopClosure {
2511  nmethod* _nm;
2512  bool     _ok;
2513public:
2514  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2515  bool ok() { return _ok; }
2516  virtual void do_oop(oop* p) {
2517    if ((*p) == NULL || (*p)->is_oop())  return;
2518    if (_ok) {
2519      _nm->print_nmethod(true);
2520      _ok = false;
2521    }
2522    tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2523                  p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2524  }
2525  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2526};
2527
2528void nmethod::verify() {
2529
2530  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2531  // seems odd.
2532
2533  if (is_zombie() || is_not_entrant() || is_unloaded())
2534    return;
2535
2536  // Make sure all the entry points are correctly aligned for patching.
2537  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2538
2539  // assert(method()->is_oop(), "must be valid");
2540
2541  ResourceMark rm;
2542
2543  if (!CodeCache::contains(this)) {
2544    fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2545  }
2546
2547  if(is_native_method() )
2548    return;
2549
2550  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2551  if (nm != this) {
2552    fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2553  }
2554
2555  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2556    if (! p->verify(this)) {
2557      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2558    }
2559  }
2560
2561  VerifyOopsClosure voc(this);
2562  oops_do(&voc);
2563  assert(voc.ok(), "embedded oops must be OK");
2564  verify_scavenge_root_oops();
2565
2566  verify_scopes();
2567}
2568
2569
2570void nmethod::verify_interrupt_point(address call_site) {
2571  // Verify IC only when nmethod installation is finished.
2572  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2573                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2574  if (is_installed) {
2575    Thread *cur = Thread::current();
2576    if (CompiledIC_lock->owner() == cur ||
2577        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2578         SafepointSynchronize::is_at_safepoint())) {
2579      CompiledIC_at(this, call_site);
2580      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2581    } else {
2582      MutexLocker ml_verify (CompiledIC_lock);
2583      CompiledIC_at(this, call_site);
2584    }
2585  }
2586
2587  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2588  assert(pd != NULL, "PcDesc must exist");
2589  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2590                                     pd->obj_decode_offset(), pd->should_reexecute(),
2591                                     pd->return_oop());
2592       !sd->is_top(); sd = sd->sender()) {
2593    sd->verify();
2594  }
2595}
2596
2597void nmethod::verify_scopes() {
2598  if( !method() ) return;       // Runtime stubs have no scope
2599  if (method()->is_native()) return; // Ignore stub methods.
2600  // iterate through all interrupt point
2601  // and verify the debug information is valid.
2602  RelocIterator iter((nmethod*)this);
2603  while (iter.next()) {
2604    address stub = NULL;
2605    switch (iter.type()) {
2606      case relocInfo::virtual_call_type:
2607        verify_interrupt_point(iter.addr());
2608        break;
2609      case relocInfo::opt_virtual_call_type:
2610        stub = iter.opt_virtual_call_reloc()->static_stub();
2611        verify_interrupt_point(iter.addr());
2612        break;
2613      case relocInfo::static_call_type:
2614        stub = iter.static_call_reloc()->static_stub();
2615        //verify_interrupt_point(iter.addr());
2616        break;
2617      case relocInfo::runtime_call_type:
2618        address destination = iter.reloc()->value();
2619        // Right now there is no way to find out which entries support
2620        // an interrupt point.  It would be nice if we had this
2621        // information in a table.
2622        break;
2623    }
2624    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2625  }
2626}
2627
2628
2629// -----------------------------------------------------------------------------
2630// Non-product code
2631#ifndef PRODUCT
2632
2633class DebugScavengeRoot: public OopClosure {
2634  nmethod* _nm;
2635  bool     _ok;
2636public:
2637  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2638  bool ok() { return _ok; }
2639  virtual void do_oop(oop* p) {
2640    if ((*p) == NULL || !(*p)->is_scavengable())  return;
2641    if (_ok) {
2642      _nm->print_nmethod(true);
2643      _ok = false;
2644    }
2645    tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2646                  p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2647    (*p)->print();
2648  }
2649  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2650};
2651
2652void nmethod::verify_scavenge_root_oops() {
2653  if (UseG1GC) {
2654    return;
2655  }
2656
2657  if (!on_scavenge_root_list()) {
2658    // Actually look inside, to verify the claim that it's clean.
2659    DebugScavengeRoot debug_scavenge_root(this);
2660    oops_do(&debug_scavenge_root);
2661    if (!debug_scavenge_root.ok())
2662      fatal("found an unadvertised bad scavengable oop in the code cache");
2663  }
2664  assert(scavenge_root_not_marked(), "");
2665}
2666
2667#endif // PRODUCT
2668
2669// Printing operations
2670
2671void nmethod::print() const {
2672  ResourceMark rm;
2673  ttyLocker ttyl;   // keep the following output all in one block
2674
2675  tty->print("Compiled method ");
2676
2677  if (is_compiled_by_c1()) {
2678    tty->print("(c1) ");
2679  } else if (is_compiled_by_c2()) {
2680    tty->print("(c2) ");
2681  } else if (is_compiled_by_shark()) {
2682    tty->print("(shark) ");
2683  } else {
2684    tty->print("(nm) ");
2685  }
2686
2687  print_on(tty, NULL);
2688
2689  if (WizardMode) {
2690    tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
2691    tty->print(" for method " INTPTR_FORMAT , p2i(method()));
2692    tty->print(" { ");
2693    if (is_in_use())      tty->print("in_use ");
2694    if (is_not_entrant()) tty->print("not_entrant ");
2695    if (is_zombie())      tty->print("zombie ");
2696    if (is_unloaded())    tty->print("unloaded ");
2697    if (on_scavenge_root_list())  tty->print("scavenge_root ");
2698    tty->print_cr("}:");
2699  }
2700  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2701                                              p2i(this),
2702                                              p2i(this) + size(),
2703                                              size());
2704  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2705                                              p2i(relocation_begin()),
2706                                              p2i(relocation_end()),
2707                                              relocation_size());
2708  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2709                                              p2i(consts_begin()),
2710                                              p2i(consts_end()),
2711                                              consts_size());
2712  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2713                                              p2i(insts_begin()),
2714                                              p2i(insts_end()),
2715                                              insts_size());
2716  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2717                                              p2i(stub_begin()),
2718                                              p2i(stub_end()),
2719                                              stub_size());
2720  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2721                                              p2i(oops_begin()),
2722                                              p2i(oops_end()),
2723                                              oops_size());
2724  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2725                                              p2i(metadata_begin()),
2726                                              p2i(metadata_end()),
2727                                              metadata_size());
2728  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2729                                              p2i(scopes_data_begin()),
2730                                              p2i(scopes_data_end()),
2731                                              scopes_data_size());
2732  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2733                                              p2i(scopes_pcs_begin()),
2734                                              p2i(scopes_pcs_end()),
2735                                              scopes_pcs_size());
2736  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2737                                              p2i(dependencies_begin()),
2738                                              p2i(dependencies_end()),
2739                                              dependencies_size());
2740  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2741                                              p2i(handler_table_begin()),
2742                                              p2i(handler_table_end()),
2743                                              handler_table_size());
2744  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2745                                              p2i(nul_chk_table_begin()),
2746                                              p2i(nul_chk_table_end()),
2747                                              nul_chk_table_size());
2748}
2749
2750void nmethod::print_code() {
2751  HandleMark hm;
2752  ResourceMark m;
2753  Disassembler::decode(this);
2754}
2755
2756
2757#ifndef PRODUCT
2758
2759void nmethod::print_scopes() {
2760  // Find the first pc desc for all scopes in the code and print it.
2761  ResourceMark rm;
2762  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2763    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2764      continue;
2765
2766    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2767    sd->print_on(tty, p);
2768  }
2769}
2770
2771void nmethod::print_dependencies() {
2772  ResourceMark rm;
2773  ttyLocker ttyl;   // keep the following output all in one block
2774  tty->print_cr("Dependencies:");
2775  for (Dependencies::DepStream deps(this); deps.next(); ) {
2776    deps.print_dependency();
2777    Klass* ctxk = deps.context_type();
2778    if (ctxk != NULL) {
2779      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
2780        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
2781      }
2782    }
2783    deps.log_dependency();  // put it into the xml log also
2784  }
2785}
2786
2787
2788void nmethod::print_relocations() {
2789  ResourceMark m;       // in case methods get printed via the debugger
2790  tty->print_cr("relocations:");
2791  RelocIterator iter(this);
2792  iter.print();
2793  if (UseRelocIndex) {
2794    jint* index_end   = (jint*)relocation_end() - 1;
2795    jint  index_size  = *index_end;
2796    jint* index_start = (jint*)( (address)index_end - index_size );
2797    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", p2i(index_start), index_size);
2798    if (index_size > 0) {
2799      jint* ip;
2800      for (ip = index_start; ip+2 <= index_end; ip += 2)
2801        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
2802                      ip[0],
2803                      ip[1],
2804                      p2i(header_end()+ip[0]),
2805                      p2i(relocation_begin()-1+ip[1]));
2806      for (; ip < index_end; ip++)
2807        tty->print_cr("  (%d ?)", ip[0]);
2808      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", p2i(ip), *ip);
2809      ip++;
2810      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", p2i(ip));
2811    }
2812  }
2813}
2814
2815
2816void nmethod::print_pcs() {
2817  ResourceMark m;       // in case methods get printed via debugger
2818  tty->print_cr("pc-bytecode offsets:");
2819  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2820    p->print(this);
2821  }
2822}
2823
2824#endif // PRODUCT
2825
2826const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
2827  RelocIterator iter(this, begin, end);
2828  bool have_one = false;
2829  while (iter.next()) {
2830    have_one = true;
2831    switch (iter.type()) {
2832        case relocInfo::none:                  return "no_reloc";
2833        case relocInfo::oop_type: {
2834          stringStream st;
2835          oop_Relocation* r = iter.oop_reloc();
2836          oop obj = r->oop_value();
2837          st.print("oop(");
2838          if (obj == NULL) st.print("NULL");
2839          else obj->print_value_on(&st);
2840          st.print(")");
2841          return st.as_string();
2842        }
2843        case relocInfo::metadata_type: {
2844          stringStream st;
2845          metadata_Relocation* r = iter.metadata_reloc();
2846          Metadata* obj = r->metadata_value();
2847          st.print("metadata(");
2848          if (obj == NULL) st.print("NULL");
2849          else obj->print_value_on(&st);
2850          st.print(")");
2851          return st.as_string();
2852        }
2853        case relocInfo::runtime_call_type: {
2854          stringStream st;
2855          st.print("runtime_call");
2856          runtime_call_Relocation* r = iter.runtime_call_reloc();
2857          address dest = r->destination();
2858          CodeBlob* cb = CodeCache::find_blob(dest);
2859          if (cb != NULL) {
2860            st.print(" %s", cb->name());
2861          }
2862          return st.as_string();
2863        }
2864        case relocInfo::virtual_call_type:     return "virtual_call";
2865        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2866        case relocInfo::static_call_type:      return "static_call";
2867        case relocInfo::static_stub_type:      return "static_stub";
2868        case relocInfo::external_word_type:    return "external_word";
2869        case relocInfo::internal_word_type:    return "internal_word";
2870        case relocInfo::section_word_type:     return "section_word";
2871        case relocInfo::poll_type:             return "poll";
2872        case relocInfo::poll_return_type:      return "poll_return";
2873        case relocInfo::type_mask:             return "type_bit_mask";
2874    }
2875  }
2876  return have_one ? "other" : NULL;
2877}
2878
2879// Return a the last scope in (begin..end]
2880ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
2881  PcDesc* p = pc_desc_near(begin+1);
2882  if (p != NULL && p->real_pc(this) <= end) {
2883    return new ScopeDesc(this, p->scope_decode_offset(),
2884                         p->obj_decode_offset(), p->should_reexecute(),
2885                         p->return_oop());
2886  }
2887  return NULL;
2888}
2889
2890void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
2891  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
2892  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
2893  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
2894  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
2895  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
2896
2897  if (has_method_handle_invokes())
2898    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
2899
2900  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
2901
2902  if (block_begin == entry_point()) {
2903    methodHandle m = method();
2904    if (m.not_null()) {
2905      stream->print("  # ");
2906      m->print_value_on(stream);
2907      stream->cr();
2908    }
2909    if (m.not_null() && !is_osr_method()) {
2910      ResourceMark rm;
2911      int sizeargs = m->size_of_parameters();
2912      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
2913      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
2914      {
2915        int sig_index = 0;
2916        if (!m->is_static())
2917          sig_bt[sig_index++] = T_OBJECT; // 'this'
2918        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
2919          BasicType t = ss.type();
2920          sig_bt[sig_index++] = t;
2921          if (type2size[t] == 2) {
2922            sig_bt[sig_index++] = T_VOID;
2923          } else {
2924            assert(type2size[t] == 1, "size is 1 or 2");
2925          }
2926        }
2927        assert(sig_index == sizeargs, "");
2928      }
2929      const char* spname = "sp"; // make arch-specific?
2930      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
2931      int stack_slot_offset = this->frame_size() * wordSize;
2932      int tab1 = 14, tab2 = 24;
2933      int sig_index = 0;
2934      int arg_index = (m->is_static() ? 0 : -1);
2935      bool did_old_sp = false;
2936      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
2937        bool at_this = (arg_index == -1);
2938        bool at_old_sp = false;
2939        BasicType t = (at_this ? T_OBJECT : ss.type());
2940        assert(t == sig_bt[sig_index], "sigs in sync");
2941        if (at_this)
2942          stream->print("  # this: ");
2943        else
2944          stream->print("  # parm%d: ", arg_index);
2945        stream->move_to(tab1);
2946        VMReg fst = regs[sig_index].first();
2947        VMReg snd = regs[sig_index].second();
2948        if (fst->is_reg()) {
2949          stream->print("%s", fst->name());
2950          if (snd->is_valid())  {
2951            stream->print(":%s", snd->name());
2952          }
2953        } else if (fst->is_stack()) {
2954          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
2955          if (offset == stack_slot_offset)  at_old_sp = true;
2956          stream->print("[%s+0x%x]", spname, offset);
2957        } else {
2958          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
2959        }
2960        stream->print(" ");
2961        stream->move_to(tab2);
2962        stream->print("= ");
2963        if (at_this) {
2964          m->method_holder()->print_value_on(stream);
2965        } else {
2966          bool did_name = false;
2967          if (!at_this && ss.is_object()) {
2968            Symbol* name = ss.as_symbol_or_null();
2969            if (name != NULL) {
2970              name->print_value_on(stream);
2971              did_name = true;
2972            }
2973          }
2974          if (!did_name)
2975            stream->print("%s", type2name(t));
2976        }
2977        if (at_old_sp) {
2978          stream->print("  (%s of caller)", spname);
2979          did_old_sp = true;
2980        }
2981        stream->cr();
2982        sig_index += type2size[t];
2983        arg_index += 1;
2984        if (!at_this)  ss.next();
2985      }
2986      if (!did_old_sp) {
2987        stream->print("  # ");
2988        stream->move_to(tab1);
2989        stream->print("[%s+0x%x]", spname, stack_slot_offset);
2990        stream->print("  (%s of caller)", spname);
2991        stream->cr();
2992      }
2993    }
2994  }
2995}
2996
2997void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
2998  // First, find an oopmap in (begin, end].
2999  // We use the odd half-closed interval so that oop maps and scope descs
3000  // which are tied to the byte after a call are printed with the call itself.
3001  address base = code_begin();
3002  ImmutableOopMapSet* oms = oop_maps();
3003  if (oms != NULL) {
3004    for (int i = 0, imax = oms->count(); i < imax; i++) {
3005      const ImmutableOopMapPair* pair = oms->pair_at(i);
3006      const ImmutableOopMap* om = pair->get_from(oms);
3007      address pc = base + pair->pc_offset();
3008      if (pc > begin) {
3009        if (pc <= end) {
3010          st->move_to(column);
3011          st->print("; ");
3012          om->print_on(st);
3013        }
3014        break;
3015      }
3016    }
3017  }
3018
3019  // Print any debug info present at this pc.
3020  ScopeDesc* sd  = scope_desc_in(begin, end);
3021  if (sd != NULL) {
3022    st->move_to(column);
3023    if (sd->bci() == SynchronizationEntryBCI) {
3024      st->print(";*synchronization entry");
3025    } else {
3026      if (sd->method() == NULL) {
3027        st->print("method is NULL");
3028      } else if (sd->method()->is_native()) {
3029        st->print("method is native");
3030      } else {
3031        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3032        st->print(";*%s", Bytecodes::name(bc));
3033        switch (bc) {
3034        case Bytecodes::_invokevirtual:
3035        case Bytecodes::_invokespecial:
3036        case Bytecodes::_invokestatic:
3037        case Bytecodes::_invokeinterface:
3038          {
3039            Bytecode_invoke invoke(sd->method(), sd->bci());
3040            st->print(" ");
3041            if (invoke.name() != NULL)
3042              invoke.name()->print_symbol_on(st);
3043            else
3044              st->print("<UNKNOWN>");
3045            break;
3046          }
3047        case Bytecodes::_getfield:
3048        case Bytecodes::_putfield:
3049        case Bytecodes::_getstatic:
3050        case Bytecodes::_putstatic:
3051          {
3052            Bytecode_field field(sd->method(), sd->bci());
3053            st->print(" ");
3054            if (field.name() != NULL)
3055              field.name()->print_symbol_on(st);
3056            else
3057              st->print("<UNKNOWN>");
3058          }
3059        }
3060      }
3061    }
3062
3063    // Print all scopes
3064    for (;sd != NULL; sd = sd->sender()) {
3065      st->move_to(column);
3066      st->print("; -");
3067      if (sd->method() == NULL) {
3068        st->print("method is NULL");
3069      } else {
3070        sd->method()->print_short_name(st);
3071      }
3072      int lineno = sd->method()->line_number_from_bci(sd->bci());
3073      if (lineno != -1) {
3074        st->print("@%d (line %d)", sd->bci(), lineno);
3075      } else {
3076        st->print("@%d", sd->bci());
3077      }
3078      st->cr();
3079    }
3080  }
3081
3082  // Print relocation information
3083  const char* str = reloc_string_for(begin, end);
3084  if (str != NULL) {
3085    if (sd != NULL) st->cr();
3086    st->move_to(column);
3087    st->print(";   {%s}", str);
3088  }
3089  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
3090  if (cont_offset != 0) {
3091    st->move_to(column);
3092    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3093  }
3094
3095}
3096
3097#ifndef PRODUCT
3098
3099void nmethod::print_value_on(outputStream* st) const {
3100  st->print("nmethod");
3101  print_on(st, NULL);
3102}
3103
3104void nmethod::print_calls(outputStream* st) {
3105  RelocIterator iter(this);
3106  while (iter.next()) {
3107    switch (iter.type()) {
3108    case relocInfo::virtual_call_type:
3109    case relocInfo::opt_virtual_call_type: {
3110      VerifyMutexLocker mc(CompiledIC_lock);
3111      CompiledIC_at(&iter)->print();
3112      break;
3113    }
3114    case relocInfo::static_call_type:
3115      st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
3116      compiledStaticCall_at(iter.reloc())->print();
3117      break;
3118    }
3119  }
3120}
3121
3122void nmethod::print_handler_table() {
3123  ExceptionHandlerTable(this).print();
3124}
3125
3126void nmethod::print_nul_chk_table() {
3127  ImplicitExceptionTable(this).print(code_begin());
3128}
3129
3130void nmethod::print_statistics() {
3131  ttyLocker ttyl;
3132  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3133  nmethod_stats.print_native_nmethod_stats();
3134  nmethod_stats.print_nmethod_stats();
3135  DebugInformationRecorder::print_statistics();
3136  nmethod_stats.print_pc_stats();
3137  Dependencies::print_statistics();
3138  if (xtty != NULL)  xtty->tail("statistics");
3139}
3140
3141#endif // PRODUCT
3142