nmethod.cpp revision 7837:9c3b4e28183c
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "code/compiledIC.hpp"
28#include "code/dependencies.hpp"
29#include "code/nmethod.hpp"
30#include "code/scopeDesc.hpp"
31#include "compiler/abstractCompiler.hpp"
32#include "compiler/compileBroker.hpp"
33#include "compiler/compileLog.hpp"
34#include "compiler/compilerOracle.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/bytecode.hpp"
37#include "oops/methodData.hpp"
38#include "prims/jvmtiRedefineClassesTrace.hpp"
39#include "prims/jvmtiImpl.hpp"
40#include "runtime/atomic.inline.hpp"
41#include "runtime/orderAccess.inline.hpp"
42#include "runtime/sharedRuntime.hpp"
43#include "runtime/sweeper.hpp"
44#include "utilities/resourceHash.hpp"
45#include "utilities/dtrace.hpp"
46#include "utilities/events.hpp"
47#include "utilities/xmlstream.hpp"
48#ifdef SHARK
49#include "shark/sharkCompiler.hpp"
50#endif
51
52PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
53
54unsigned char nmethod::_global_unloading_clock = 0;
55
56#ifdef DTRACE_ENABLED
57
58// Only bother with this argument setup if dtrace is available
59
60#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
61  {                                                                       \
62    Method* m = (method);                                                 \
63    if (m != NULL) {                                                      \
64      Symbol* klass_name = m->klass_name();                               \
65      Symbol* name = m->name();                                           \
66      Symbol* signature = m->signature();                                 \
67      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
68        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
69        (char *) name->bytes(), name->utf8_length(),                               \
70        (char *) signature->bytes(), signature->utf8_length());                    \
71    }                                                                     \
72  }
73
74#else //  ndef DTRACE_ENABLED
75
76#define DTRACE_METHOD_UNLOAD_PROBE(method)
77
78#endif
79
80bool nmethod::is_compiled_by_c1() const {
81  if (compiler() == NULL) {
82    return false;
83  }
84  return compiler()->is_c1();
85}
86bool nmethod::is_compiled_by_c2() const {
87  if (compiler() == NULL) {
88    return false;
89  }
90  return compiler()->is_c2();
91}
92bool nmethod::is_compiled_by_shark() const {
93  if (compiler() == NULL) {
94    return false;
95  }
96  return compiler()->is_shark();
97}
98
99
100
101//---------------------------------------------------------------------------------
102// NMethod statistics
103// They are printed under various flags, including:
104//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
105// (In the latter two cases, they like other stats are printed to the log only.)
106
107#ifndef PRODUCT
108// These variables are put into one block to reduce relocations
109// and make it simpler to print from the debugger.
110static
111struct nmethod_stats_struct {
112  int nmethod_count;
113  int total_size;
114  int relocation_size;
115  int consts_size;
116  int insts_size;
117  int stub_size;
118  int scopes_data_size;
119  int scopes_pcs_size;
120  int dependencies_size;
121  int handler_table_size;
122  int nul_chk_table_size;
123  int oops_size;
124
125  void note_nmethod(nmethod* nm) {
126    nmethod_count += 1;
127    total_size          += nm->size();
128    relocation_size     += nm->relocation_size();
129    consts_size         += nm->consts_size();
130    insts_size          += nm->insts_size();
131    stub_size           += nm->stub_size();
132    oops_size           += nm->oops_size();
133    scopes_data_size    += nm->scopes_data_size();
134    scopes_pcs_size     += nm->scopes_pcs_size();
135    dependencies_size   += nm->dependencies_size();
136    handler_table_size  += nm->handler_table_size();
137    nul_chk_table_size  += nm->nul_chk_table_size();
138  }
139  void print_nmethod_stats() {
140    if (nmethod_count == 0)  return;
141    tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count);
142    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
143    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
144    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
145    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
146    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
147    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
148    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
149    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
150    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
151    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
152    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
153  }
154
155  int native_nmethod_count;
156  int native_total_size;
157  int native_relocation_size;
158  int native_insts_size;
159  int native_oops_size;
160  void note_native_nmethod(nmethod* nm) {
161    native_nmethod_count += 1;
162    native_total_size       += nm->size();
163    native_relocation_size  += nm->relocation_size();
164    native_insts_size       += nm->insts_size();
165    native_oops_size        += nm->oops_size();
166  }
167  void print_native_nmethod_stats() {
168    if (native_nmethod_count == 0)  return;
169    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
170    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
171    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
172    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
173    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
174  }
175
176  int pc_desc_resets;   // number of resets (= number of caches)
177  int pc_desc_queries;  // queries to nmethod::find_pc_desc
178  int pc_desc_approx;   // number of those which have approximate true
179  int pc_desc_repeats;  // number of _pc_descs[0] hits
180  int pc_desc_hits;     // number of LRU cache hits
181  int pc_desc_tests;    // total number of PcDesc examinations
182  int pc_desc_searches; // total number of quasi-binary search steps
183  int pc_desc_adds;     // number of LUR cache insertions
184
185  void print_pc_stats() {
186    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
187                  pc_desc_queries,
188                  (double)(pc_desc_tests + pc_desc_searches)
189                  / pc_desc_queries);
190    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
191                  pc_desc_resets,
192                  pc_desc_queries, pc_desc_approx,
193                  pc_desc_repeats, pc_desc_hits,
194                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
195  }
196} nmethod_stats;
197#endif //PRODUCT
198
199
200//---------------------------------------------------------------------------------
201
202
203ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
204  assert(pc != NULL, "Must be non null");
205  assert(exception.not_null(), "Must be non null");
206  assert(handler != NULL, "Must be non null");
207
208  _count = 0;
209  _exception_type = exception->klass();
210  _next = NULL;
211
212  add_address_and_handler(pc,handler);
213}
214
215
216address ExceptionCache::match(Handle exception, address pc) {
217  assert(pc != NULL,"Must be non null");
218  assert(exception.not_null(),"Must be non null");
219  if (exception->klass() == exception_type()) {
220    return (test_address(pc));
221  }
222
223  return NULL;
224}
225
226
227bool ExceptionCache::match_exception_with_space(Handle exception) {
228  assert(exception.not_null(),"Must be non null");
229  if (exception->klass() == exception_type() && count() < cache_size) {
230    return true;
231  }
232  return false;
233}
234
235
236address ExceptionCache::test_address(address addr) {
237  for (int i=0; i<count(); i++) {
238    if (pc_at(i) == addr) {
239      return handler_at(i);
240    }
241  }
242  return NULL;
243}
244
245
246bool ExceptionCache::add_address_and_handler(address addr, address handler) {
247  if (test_address(addr) == handler) return true;
248  if (count() < cache_size) {
249    set_pc_at(count(),addr);
250    set_handler_at(count(), handler);
251    increment_count();
252    return true;
253  }
254  return false;
255}
256
257
258// private method for handling exception cache
259// These methods are private, and used to manipulate the exception cache
260// directly.
261ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
262  ExceptionCache* ec = exception_cache();
263  while (ec != NULL) {
264    if (ec->match_exception_with_space(exception)) {
265      return ec;
266    }
267    ec = ec->next();
268  }
269  return NULL;
270}
271
272
273//-----------------------------------------------------------------------------
274
275
276// Helper used by both find_pc_desc methods.
277static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
278  NOT_PRODUCT(++nmethod_stats.pc_desc_tests);
279  if (!approximate)
280    return pc->pc_offset() == pc_offset;
281  else
282    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
283}
284
285void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
286  if (initial_pc_desc == NULL) {
287    _pc_descs[0] = NULL; // native method; no PcDescs at all
288    return;
289  }
290  NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
291  // reset the cache by filling it with benign (non-null) values
292  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
293  for (int i = 0; i < cache_size; i++)
294    _pc_descs[i] = initial_pc_desc;
295}
296
297PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
298  NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
299  NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
300
301  // Note: one might think that caching the most recently
302  // read value separately would be a win, but one would be
303  // wrong.  When many threads are updating it, the cache
304  // line it's in would bounce between caches, negating
305  // any benefit.
306
307  // In order to prevent race conditions do not load cache elements
308  // repeatedly, but use a local copy:
309  PcDesc* res;
310
311  // Step one:  Check the most recently added value.
312  res = _pc_descs[0];
313  if (res == NULL) return NULL;  // native method; no PcDescs at all
314  if (match_desc(res, pc_offset, approximate)) {
315    NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
316    return res;
317  }
318
319  // Step two:  Check the rest of the LRU cache.
320  for (int i = 1; i < cache_size; ++i) {
321    res = _pc_descs[i];
322    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
323    if (match_desc(res, pc_offset, approximate)) {
324      NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
325      return res;
326    }
327  }
328
329  // Report failure.
330  return NULL;
331}
332
333void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
334  NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
335  // Update the LRU cache by shifting pc_desc forward.
336  for (int i = 0; i < cache_size; i++)  {
337    PcDesc* next = _pc_descs[i];
338    _pc_descs[i] = pc_desc;
339    pc_desc = next;
340  }
341}
342
343// adjust pcs_size so that it is a multiple of both oopSize and
344// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
345// of oopSize, then 2*sizeof(PcDesc) is)
346static int adjust_pcs_size(int pcs_size) {
347  int nsize = round_to(pcs_size,   oopSize);
348  if ((nsize % sizeof(PcDesc)) != 0) {
349    nsize = pcs_size + sizeof(PcDesc);
350  }
351  assert((nsize % oopSize) == 0, "correct alignment");
352  return nsize;
353}
354
355//-----------------------------------------------------------------------------
356
357
358void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
359  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
360  assert(new_entry != NULL,"Must be non null");
361  assert(new_entry->next() == NULL, "Must be null");
362
363  if (exception_cache() != NULL) {
364    new_entry->set_next(exception_cache());
365  }
366  set_exception_cache(new_entry);
367}
368
369void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
370  ExceptionCache* prev = NULL;
371  ExceptionCache* curr = exception_cache();
372
373  while (curr != NULL) {
374    ExceptionCache* next = curr->next();
375
376    Klass* ex_klass = curr->exception_type();
377    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
378      if (prev == NULL) {
379        set_exception_cache(next);
380      } else {
381        prev->set_next(next);
382      }
383      delete curr;
384      // prev stays the same.
385    } else {
386      prev = curr;
387    }
388
389    curr = next;
390  }
391}
392
393// public method for accessing the exception cache
394// These are the public access methods.
395address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
396  // We never grab a lock to read the exception cache, so we may
397  // have false negatives. This is okay, as it can only happen during
398  // the first few exception lookups for a given nmethod.
399  ExceptionCache* ec = exception_cache();
400  while (ec != NULL) {
401    address ret_val;
402    if ((ret_val = ec->match(exception,pc)) != NULL) {
403      return ret_val;
404    }
405    ec = ec->next();
406  }
407  return NULL;
408}
409
410
411void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
412  // There are potential race conditions during exception cache updates, so we
413  // must own the ExceptionCache_lock before doing ANY modifications. Because
414  // we don't lock during reads, it is possible to have several threads attempt
415  // to update the cache with the same data. We need to check for already inserted
416  // copies of the current data before adding it.
417
418  MutexLocker ml(ExceptionCache_lock);
419  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
420
421  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
422    target_entry = new ExceptionCache(exception,pc,handler);
423    add_exception_cache_entry(target_entry);
424  }
425}
426
427
428//-------------end of code for ExceptionCache--------------
429
430
431int nmethod::total_size() const {
432  return
433    consts_size()        +
434    insts_size()         +
435    stub_size()          +
436    scopes_data_size()   +
437    scopes_pcs_size()    +
438    handler_table_size() +
439    nul_chk_table_size();
440}
441
442const char* nmethod::compile_kind() const {
443  if (is_osr_method())     return "osr";
444  if (method() != NULL && is_native_method())  return "c2n";
445  return NULL;
446}
447
448// Fill in default values for various flag fields
449void nmethod::init_defaults() {
450  _state                      = in_use;
451  _unloading_clock            = 0;
452  _marked_for_reclamation     = 0;
453  _has_flushed_dependencies   = 0;
454  _has_unsafe_access          = 0;
455  _has_method_handle_invokes  = 0;
456  _lazy_critical_native       = 0;
457  _has_wide_vectors           = 0;
458  _marked_for_deoptimization  = 0;
459  _lock_count                 = 0;
460  _stack_traversal_mark       = 0;
461  _unload_reported            = false;           // jvmti state
462
463#ifdef ASSERT
464  _oops_are_stale             = false;
465#endif
466
467  _oops_do_mark_link       = NULL;
468  _jmethod_id              = NULL;
469  _osr_link                = NULL;
470  if (UseG1GC) {
471    _unloading_next        = NULL;
472  } else {
473    _scavenge_root_link    = NULL;
474  }
475  _scavenge_root_state     = 0;
476  _compiler                = NULL;
477#if INCLUDE_RTM_OPT
478  _rtm_state               = NoRTM;
479#endif
480}
481
482nmethod* nmethod::new_native_nmethod(methodHandle method,
483  int compile_id,
484  CodeBuffer *code_buffer,
485  int vep_offset,
486  int frame_complete,
487  int frame_size,
488  ByteSize basic_lock_owner_sp_offset,
489  ByteSize basic_lock_sp_offset,
490  OopMapSet* oop_maps) {
491  code_buffer->finalize_oop_references(method);
492  // create nmethod
493  nmethod* nm = NULL;
494  {
495    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
496    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
497    CodeOffsets offsets;
498    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
499    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
500    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
501                                            compile_id, &offsets,
502                                            code_buffer, frame_size,
503                                            basic_lock_owner_sp_offset,
504                                            basic_lock_sp_offset, oop_maps);
505    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
506    if (PrintAssembly && nm != NULL) {
507      Disassembler::decode(nm);
508    }
509  }
510  // verify nmethod
511  debug_only(if (nm) nm->verify();) // might block
512
513  if (nm != NULL) {
514    nm->log_new_nmethod();
515  }
516
517  return nm;
518}
519
520nmethod* nmethod::new_nmethod(methodHandle method,
521  int compile_id,
522  int entry_bci,
523  CodeOffsets* offsets,
524  int orig_pc_offset,
525  DebugInformationRecorder* debug_info,
526  Dependencies* dependencies,
527  CodeBuffer* code_buffer, int frame_size,
528  OopMapSet* oop_maps,
529  ExceptionHandlerTable* handler_table,
530  ImplicitExceptionTable* nul_chk_table,
531  AbstractCompiler* compiler,
532  int comp_level
533)
534{
535  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
536  code_buffer->finalize_oop_references(method);
537  // create nmethod
538  nmethod* nm = NULL;
539  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
540    int nmethod_size =
541      allocation_size(code_buffer, sizeof(nmethod))
542      + adjust_pcs_size(debug_info->pcs_size())
543      + round_to(dependencies->size_in_bytes() , oopSize)
544      + round_to(handler_table->size_in_bytes(), oopSize)
545      + round_to(nul_chk_table->size_in_bytes(), oopSize)
546      + round_to(debug_info->data_size()       , oopSize);
547
548    nm = new (nmethod_size, comp_level)
549    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
550            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
551            oop_maps,
552            handler_table,
553            nul_chk_table,
554            compiler,
555            comp_level);
556
557    if (nm != NULL) {
558      // To make dependency checking during class loading fast, record
559      // the nmethod dependencies in the classes it is dependent on.
560      // This allows the dependency checking code to simply walk the
561      // class hierarchy above the loaded class, checking only nmethods
562      // which are dependent on those classes.  The slow way is to
563      // check every nmethod for dependencies which makes it linear in
564      // the number of methods compiled.  For applications with a lot
565      // classes the slow way is too slow.
566      for (Dependencies::DepStream deps(nm); deps.next(); ) {
567        Klass* klass = deps.context_type();
568        if (klass == NULL) {
569          continue;  // ignore things like evol_method
570        }
571
572        // record this nmethod as dependent on this klass
573        InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
574      }
575      NOT_PRODUCT(nmethod_stats.note_nmethod(nm));
576      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
577        Disassembler::decode(nm);
578      }
579    }
580  }
581  // Do verification and logging outside CodeCache_lock.
582  if (nm != NULL) {
583    // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
584    DEBUG_ONLY(nm->verify();)
585    nm->log_new_nmethod();
586  }
587  return nm;
588}
589
590
591// For native wrappers
592nmethod::nmethod(
593  Method* method,
594  int nmethod_size,
595  int compile_id,
596  CodeOffsets* offsets,
597  CodeBuffer* code_buffer,
598  int frame_size,
599  ByteSize basic_lock_owner_sp_offset,
600  ByteSize basic_lock_sp_offset,
601  OopMapSet* oop_maps )
602  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
603             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
604  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
605  _native_basic_lock_sp_offset(basic_lock_sp_offset)
606{
607  {
608    debug_only(No_Safepoint_Verifier nsv;)
609    assert_locked_or_safepoint(CodeCache_lock);
610
611    init_defaults();
612    _method                  = method;
613    _entry_bci               = InvocationEntryBci;
614    // We have no exception handler or deopt handler make the
615    // values something that will never match a pc like the nmethod vtable entry
616    _exception_offset        = 0;
617    _deoptimize_offset       = 0;
618    _deoptimize_mh_offset    = 0;
619    _orig_pc_offset          = 0;
620
621    _consts_offset           = data_offset();
622    _stub_offset             = data_offset();
623    _oops_offset             = data_offset();
624    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
625    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
626    _scopes_pcs_offset       = _scopes_data_offset;
627    _dependencies_offset     = _scopes_pcs_offset;
628    _handler_table_offset    = _dependencies_offset;
629    _nul_chk_table_offset    = _handler_table_offset;
630    _nmethod_end_offset      = _nul_chk_table_offset;
631    _compile_id              = compile_id;
632    _comp_level              = CompLevel_none;
633    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
634    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
635    _osr_entry_point         = NULL;
636    _exception_cache         = NULL;
637    _pc_desc_cache.reset_to(NULL);
638    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
639
640    code_buffer->copy_values_to(this);
641    if (ScavengeRootsInCode) {
642      if (detect_scavenge_root_oops()) {
643        CodeCache::add_scavenge_root_nmethod(this);
644      }
645      Universe::heap()->register_nmethod(this);
646    }
647    debug_only(verify_scavenge_root_oops());
648    CodeCache::commit(this);
649  }
650
651  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
652    ttyLocker ttyl;  // keep the following output all in one block
653    // This output goes directly to the tty, not the compiler log.
654    // To enable tools to match it up with the compilation activity,
655    // be sure to tag this tty output with the compile ID.
656    if (xtty != NULL) {
657      xtty->begin_head("print_native_nmethod");
658      xtty->method(_method);
659      xtty->stamp();
660      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
661    }
662    // print the header part first
663    print();
664    // then print the requested information
665    if (PrintNativeNMethods) {
666      print_code();
667      if (oop_maps != NULL) {
668        oop_maps->print();
669      }
670    }
671    if (PrintRelocations) {
672      print_relocations();
673    }
674    if (xtty != NULL) {
675      xtty->tail("print_native_nmethod");
676    }
677  }
678}
679
680void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
681  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
682}
683
684nmethod::nmethod(
685  Method* method,
686  int nmethod_size,
687  int compile_id,
688  int entry_bci,
689  CodeOffsets* offsets,
690  int orig_pc_offset,
691  DebugInformationRecorder* debug_info,
692  Dependencies* dependencies,
693  CodeBuffer *code_buffer,
694  int frame_size,
695  OopMapSet* oop_maps,
696  ExceptionHandlerTable* handler_table,
697  ImplicitExceptionTable* nul_chk_table,
698  AbstractCompiler* compiler,
699  int comp_level
700  )
701  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
702             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
703  _native_receiver_sp_offset(in_ByteSize(-1)),
704  _native_basic_lock_sp_offset(in_ByteSize(-1))
705{
706  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
707  {
708    debug_only(No_Safepoint_Verifier nsv;)
709    assert_locked_or_safepoint(CodeCache_lock);
710
711    init_defaults();
712    _method                  = method;
713    _entry_bci               = entry_bci;
714    _compile_id              = compile_id;
715    _comp_level              = comp_level;
716    _compiler                = compiler;
717    _orig_pc_offset          = orig_pc_offset;
718    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
719
720    // Section offsets
721    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
722    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
723
724    // Exception handler and deopt handler are in the stub section
725    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
726    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
727    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
728    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
729    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
730      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
731    } else {
732      _deoptimize_mh_offset  = -1;
733    }
734    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
735      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
736    } else {
737      _unwind_handler_offset = -1;
738    }
739
740    _oops_offset             = data_offset();
741    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
742    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
743
744    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
745    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
746    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
747    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
748    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
749
750    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
751    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
752    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
753    _exception_cache         = NULL;
754    _pc_desc_cache.reset_to(scopes_pcs_begin());
755
756    // Copy contents of ScopeDescRecorder to nmethod
757    code_buffer->copy_values_to(this);
758    debug_info->copy_to(this);
759    dependencies->copy_to(this);
760    if (ScavengeRootsInCode) {
761      if (detect_scavenge_root_oops()) {
762        CodeCache::add_scavenge_root_nmethod(this);
763      }
764      Universe::heap()->register_nmethod(this);
765    }
766    debug_only(verify_scavenge_root_oops());
767
768    CodeCache::commit(this);
769
770    // Copy contents of ExceptionHandlerTable to nmethod
771    handler_table->copy_to(this);
772    nul_chk_table->copy_to(this);
773
774    // we use the information of entry points to find out if a method is
775    // static or non static
776    assert(compiler->is_c2() ||
777           _method->is_static() == (entry_point() == _verified_entry_point),
778           " entry points must be same for static methods and vice versa");
779  }
780
781  bool printnmethods = PrintNMethods
782    || CompilerOracle::should_print(_method)
783    || CompilerOracle::has_option_string(_method, "PrintNMethods");
784  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
785    print_nmethod(printnmethods);
786  }
787}
788
789
790// Print a short set of xml attributes to identify this nmethod.  The
791// output should be embedded in some other element.
792void nmethod::log_identity(xmlStream* log) const {
793  log->print(" compile_id='%d'", compile_id());
794  const char* nm_kind = compile_kind();
795  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
796  if (compiler() != NULL) {
797    log->print(" compiler='%s'", compiler()->name());
798  }
799  if (TieredCompilation) {
800    log->print(" level='%d'", comp_level());
801  }
802}
803
804
805#define LOG_OFFSET(log, name)                    \
806  if ((intptr_t)name##_end() - (intptr_t)name##_begin()) \
807    log->print(" " XSTR(name) "_offset='%d'"    , \
808               (intptr_t)name##_begin() - (intptr_t)this)
809
810
811void nmethod::log_new_nmethod() const {
812  if (LogCompilation && xtty != NULL) {
813    ttyLocker ttyl;
814    HandleMark hm;
815    xtty->begin_elem("nmethod");
816    log_identity(xtty);
817    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size());
818    xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this);
819
820    LOG_OFFSET(xtty, relocation);
821    LOG_OFFSET(xtty, consts);
822    LOG_OFFSET(xtty, insts);
823    LOG_OFFSET(xtty, stub);
824    LOG_OFFSET(xtty, scopes_data);
825    LOG_OFFSET(xtty, scopes_pcs);
826    LOG_OFFSET(xtty, dependencies);
827    LOG_OFFSET(xtty, handler_table);
828    LOG_OFFSET(xtty, nul_chk_table);
829    LOG_OFFSET(xtty, oops);
830
831    xtty->method(method());
832    xtty->stamp();
833    xtty->end_elem();
834  }
835}
836
837#undef LOG_OFFSET
838
839
840// Print out more verbose output usually for a newly created nmethod.
841void nmethod::print_on(outputStream* st, const char* msg) const {
842  if (st != NULL) {
843    ttyLocker ttyl;
844    if (WizardMode) {
845      CompileTask::print_compilation(st, this, msg, /*short_form:*/ true);
846      st->print_cr(" (" INTPTR_FORMAT ")", this);
847    } else {
848      CompileTask::print_compilation(st, this, msg, /*short_form:*/ false);
849    }
850  }
851}
852
853
854void nmethod::print_nmethod(bool printmethod) {
855  ttyLocker ttyl;  // keep the following output all in one block
856  if (xtty != NULL) {
857    xtty->begin_head("print_nmethod");
858    xtty->stamp();
859    xtty->end_head();
860  }
861  // print the header part first
862  print();
863  // then print the requested information
864  if (printmethod) {
865    print_code();
866    print_pcs();
867    if (oop_maps()) {
868      oop_maps()->print();
869    }
870  }
871  if (PrintDebugInfo) {
872    print_scopes();
873  }
874  if (PrintRelocations) {
875    print_relocations();
876  }
877  if (PrintDependencies) {
878    print_dependencies();
879  }
880  if (PrintExceptionHandlers) {
881    print_handler_table();
882    print_nul_chk_table();
883  }
884  if (xtty != NULL) {
885    xtty->tail("print_nmethod");
886  }
887}
888
889
890// Promote one word from an assembly-time handle to a live embedded oop.
891inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
892  if (handle == NULL ||
893      // As a special case, IC oops are initialized to 1 or -1.
894      handle == (jobject) Universe::non_oop_word()) {
895    (*dest) = (oop) handle;
896  } else {
897    (*dest) = JNIHandles::resolve_non_null(handle);
898  }
899}
900
901
902// Have to have the same name because it's called by a template
903void nmethod::copy_values(GrowableArray<jobject>* array) {
904  int length = array->length();
905  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
906  oop* dest = oops_begin();
907  for (int index = 0 ; index < length; index++) {
908    initialize_immediate_oop(&dest[index], array->at(index));
909  }
910
911  // Now we can fix up all the oops in the code.  We need to do this
912  // in the code because the assembler uses jobjects as placeholders.
913  // The code and relocations have already been initialized by the
914  // CodeBlob constructor, so it is valid even at this early point to
915  // iterate over relocations and patch the code.
916  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
917}
918
919void nmethod::copy_values(GrowableArray<Metadata*>* array) {
920  int length = array->length();
921  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
922  Metadata** dest = metadata_begin();
923  for (int index = 0 ; index < length; index++) {
924    dest[index] = array->at(index);
925  }
926}
927
928bool nmethod::is_at_poll_return(address pc) {
929  RelocIterator iter(this, pc, pc+1);
930  while (iter.next()) {
931    if (iter.type() == relocInfo::poll_return_type)
932      return true;
933  }
934  return false;
935}
936
937
938bool nmethod::is_at_poll_or_poll_return(address pc) {
939  RelocIterator iter(this, pc, pc+1);
940  while (iter.next()) {
941    relocInfo::relocType t = iter.type();
942    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
943      return true;
944  }
945  return false;
946}
947
948
949void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
950  // re-patch all oop-bearing instructions, just in case some oops moved
951  RelocIterator iter(this, begin, end);
952  while (iter.next()) {
953    if (iter.type() == relocInfo::oop_type) {
954      oop_Relocation* reloc = iter.oop_reloc();
955      if (initialize_immediates && reloc->oop_is_immediate()) {
956        oop* dest = reloc->oop_addr();
957        initialize_immediate_oop(dest, (jobject) *dest);
958      }
959      // Refresh the oop-related bits of this instruction.
960      reloc->fix_oop_relocation();
961    } else if (iter.type() == relocInfo::metadata_type) {
962      metadata_Relocation* reloc = iter.metadata_reloc();
963      reloc->fix_metadata_relocation();
964    }
965  }
966}
967
968
969void nmethod::verify_oop_relocations() {
970  // Ensure sure that the code matches the current oop values
971  RelocIterator iter(this, NULL, NULL);
972  while (iter.next()) {
973    if (iter.type() == relocInfo::oop_type) {
974      oop_Relocation* reloc = iter.oop_reloc();
975      if (!reloc->oop_is_immediate()) {
976        reloc->verify_oop_relocation();
977      }
978    }
979  }
980}
981
982
983ScopeDesc* nmethod::scope_desc_at(address pc) {
984  PcDesc* pd = pc_desc_at(pc);
985  guarantee(pd != NULL, "scope must be present");
986  return new ScopeDesc(this, pd->scope_decode_offset(),
987                       pd->obj_decode_offset(), pd->should_reexecute(),
988                       pd->return_oop());
989}
990
991
992void nmethod::clear_inline_caches() {
993  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
994  if (is_zombie()) {
995    return;
996  }
997
998  RelocIterator iter(this);
999  while (iter.next()) {
1000    iter.reloc()->clear_inline_cache();
1001  }
1002}
1003
1004// Clear ICStubs of all compiled ICs
1005void nmethod::clear_ic_stubs() {
1006  assert_locked_or_safepoint(CompiledIC_lock);
1007  RelocIterator iter(this);
1008  while(iter.next()) {
1009    if (iter.type() == relocInfo::virtual_call_type) {
1010      CompiledIC* ic = CompiledIC_at(&iter);
1011      ic->clear_ic_stub();
1012    }
1013  }
1014}
1015
1016
1017void nmethod::cleanup_inline_caches() {
1018
1019  assert_locked_or_safepoint(CompiledIC_lock);
1020
1021  // If the method is not entrant or zombie then a JMP is plastered over the
1022  // first few bytes.  If an oop in the old code was there, that oop
1023  // should not get GC'd.  Skip the first few bytes of oops on
1024  // not-entrant methods.
1025  address low_boundary = verified_entry_point();
1026  if (!is_in_use()) {
1027    low_boundary += NativeJump::instruction_size;
1028    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1029    // This means that the low_boundary is going to be a little too high.
1030    // This shouldn't matter, since oops of non-entrant methods are never used.
1031    // In fact, why are we bothering to look at oops in a non-entrant method??
1032  }
1033
1034  // Find all calls in an nmethod, and clear the ones that points to zombie methods
1035  ResourceMark rm;
1036  RelocIterator iter(this, low_boundary);
1037  while(iter.next()) {
1038    switch(iter.type()) {
1039      case relocInfo::virtual_call_type:
1040      case relocInfo::opt_virtual_call_type: {
1041        CompiledIC *ic = CompiledIC_at(&iter);
1042        // Ok, to lookup references to zombies here
1043        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1044        if( cb != NULL && cb->is_nmethod() ) {
1045          nmethod* nm = (nmethod*)cb;
1046          // Clean inline caches pointing to both zombie and not_entrant methods
1047          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
1048        }
1049        break;
1050      }
1051      case relocInfo::static_call_type: {
1052        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1053        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1054        if( cb != NULL && cb->is_nmethod() ) {
1055          nmethod* nm = (nmethod*)cb;
1056          // Clean inline caches pointing to both zombie and not_entrant methods
1057          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1058        }
1059        break;
1060      }
1061    }
1062  }
1063}
1064
1065void nmethod::verify_clean_inline_caches() {
1066  assert_locked_or_safepoint(CompiledIC_lock);
1067
1068  // If the method is not entrant or zombie then a JMP is plastered over the
1069  // first few bytes.  If an oop in the old code was there, that oop
1070  // should not get GC'd.  Skip the first few bytes of oops on
1071  // not-entrant methods.
1072  address low_boundary = verified_entry_point();
1073  if (!is_in_use()) {
1074    low_boundary += NativeJump::instruction_size;
1075    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1076    // This means that the low_boundary is going to be a little too high.
1077    // This shouldn't matter, since oops of non-entrant methods are never used.
1078    // In fact, why are we bothering to look at oops in a non-entrant method??
1079  }
1080
1081  ResourceMark rm;
1082  RelocIterator iter(this, low_boundary);
1083  while(iter.next()) {
1084    switch(iter.type()) {
1085      case relocInfo::virtual_call_type:
1086      case relocInfo::opt_virtual_call_type: {
1087        CompiledIC *ic = CompiledIC_at(&iter);
1088        // Ok, to lookup references to zombies here
1089        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1090        if( cb != NULL && cb->is_nmethod() ) {
1091          nmethod* nm = (nmethod*)cb;
1092          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1093          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1094            assert(ic->is_clean(), "IC should be clean");
1095          }
1096        }
1097        break;
1098      }
1099      case relocInfo::static_call_type: {
1100        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1101        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1102        if( cb != NULL && cb->is_nmethod() ) {
1103          nmethod* nm = (nmethod*)cb;
1104          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1105          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1106            assert(csc->is_clean(), "IC should be clean");
1107          }
1108        }
1109        break;
1110      }
1111    }
1112  }
1113}
1114
1115int nmethod::verify_icholder_relocations() {
1116  int count = 0;
1117
1118  RelocIterator iter(this);
1119  while(iter.next()) {
1120    if (iter.type() == relocInfo::virtual_call_type) {
1121      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1122        CompiledIC *ic = CompiledIC_at(&iter);
1123        if (TraceCompiledIC) {
1124          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1125          ic->print();
1126        }
1127        assert(ic->cached_icholder() != NULL, "must be non-NULL");
1128        count++;
1129      }
1130    }
1131  }
1132
1133  return count;
1134}
1135
1136// This is a private interface with the sweeper.
1137void nmethod::mark_as_seen_on_stack() {
1138  assert(is_alive(), "Must be an alive method");
1139  // Set the traversal mark to ensure that the sweeper does 2
1140  // cleaning passes before moving to zombie.
1141  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1142}
1143
1144// Tell if a non-entrant method can be converted to a zombie (i.e.,
1145// there are no activations on the stack, not in use by the VM,
1146// and not in use by the ServiceThread)
1147bool nmethod::can_not_entrant_be_converted() {
1148  assert(is_not_entrant(), "must be a non-entrant method");
1149
1150  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1151  // count can be greater than the stack traversal count before it hits the
1152  // nmethod for the second time.
1153  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1154         !is_locked_by_vm();
1155}
1156
1157void nmethod::inc_decompile_count() {
1158  if (!is_compiled_by_c2()) return;
1159  // Could be gated by ProfileTraps, but do not bother...
1160  Method* m = method();
1161  if (m == NULL)  return;
1162  MethodData* mdo = m->method_data();
1163  if (mdo == NULL)  return;
1164  // There is a benign race here.  See comments in methodData.hpp.
1165  mdo->inc_decompile_count();
1166}
1167
1168void nmethod::increase_unloading_clock() {
1169  _global_unloading_clock++;
1170  if (_global_unloading_clock == 0) {
1171    // _nmethods are allocated with _unloading_clock == 0,
1172    // so 0 is never used as a clock value.
1173    _global_unloading_clock = 1;
1174  }
1175}
1176
1177void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1178  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1179}
1180
1181unsigned char nmethod::unloading_clock() {
1182  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1183}
1184
1185void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1186
1187  post_compiled_method_unload();
1188
1189  // Since this nmethod is being unloaded, make sure that dependencies
1190  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1191  // indicate that this work is being done during a GC.
1192  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1193  assert(is_alive != NULL, "Should be non-NULL");
1194  // A non-NULL is_alive closure indicates that this is being called during GC.
1195  flush_dependencies(is_alive);
1196
1197  // Break cycle between nmethod & method
1198  if (TraceClassUnloading && WizardMode) {
1199    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1200                  " unloadable], Method*(" INTPTR_FORMAT
1201                  "), cause(" INTPTR_FORMAT ")",
1202                  this, (address)_method, (address)cause);
1203    if (!Universe::heap()->is_gc_active())
1204      cause->klass()->print();
1205  }
1206  // Unlink the osr method, so we do not look this up again
1207  if (is_osr_method()) {
1208    invalidate_osr_method();
1209  }
1210  // If _method is already NULL the Method* is about to be unloaded,
1211  // so we don't have to break the cycle. Note that it is possible to
1212  // have the Method* live here, in case we unload the nmethod because
1213  // it is pointing to some oop (other than the Method*) being unloaded.
1214  if (_method != NULL) {
1215    // OSR methods point to the Method*, but the Method* does not
1216    // point back!
1217    if (_method->code() == this) {
1218      _method->clear_code(); // Break a cycle
1219    }
1220    _method = NULL;            // Clear the method of this dead nmethod
1221  }
1222  // Make the class unloaded - i.e., change state and notify sweeper
1223  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1224  if (is_in_use()) {
1225    // Transitioning directly from live to unloaded -- so
1226    // we need to force a cache clean-up; remember this
1227    // for later on.
1228    CodeCache::set_needs_cache_clean(true);
1229  }
1230
1231  // Unregister must be done before the state change
1232  Universe::heap()->unregister_nmethod(this);
1233
1234  _state = unloaded;
1235
1236  // Log the unloading.
1237  log_state_change();
1238
1239  // The Method* is gone at this point
1240  assert(_method == NULL, "Tautology");
1241
1242  set_osr_link(NULL);
1243  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1244  NMethodSweeper::report_state_change(this);
1245}
1246
1247void nmethod::invalidate_osr_method() {
1248  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1249  // Remove from list of active nmethods
1250  if (method() != NULL)
1251    method()->method_holder()->remove_osr_nmethod(this);
1252}
1253
1254void nmethod::log_state_change() const {
1255  if (LogCompilation) {
1256    if (xtty != NULL) {
1257      ttyLocker ttyl;  // keep the following output all in one block
1258      if (_state == unloaded) {
1259        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1260                         os::current_thread_id());
1261      } else {
1262        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1263                         os::current_thread_id(),
1264                         (_state == zombie ? " zombie='1'" : ""));
1265      }
1266      log_identity(xtty);
1267      xtty->stamp();
1268      xtty->end_elem();
1269    }
1270  }
1271  if (PrintCompilation && _state != unloaded) {
1272    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1273  }
1274}
1275
1276/**
1277 * Common functionality for both make_not_entrant and make_zombie
1278 */
1279bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1280  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1281  assert(!is_zombie(), "should not already be a zombie");
1282
1283  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1284  nmethodLocker nml(this);
1285  methodHandle the_method(method());
1286  No_Safepoint_Verifier nsv;
1287
1288  // during patching, depending on the nmethod state we must notify the GC that
1289  // code has been unloaded, unregistering it. We cannot do this right while
1290  // holding the Patching_lock because we need to use the CodeCache_lock. This
1291  // would be prone to deadlocks.
1292  // This flag is used to remember whether we need to later lock and unregister.
1293  bool nmethod_needs_unregister = false;
1294
1295  {
1296    // invalidate osr nmethod before acquiring the patching lock since
1297    // they both acquire leaf locks and we don't want a deadlock.
1298    // This logic is equivalent to the logic below for patching the
1299    // verified entry point of regular methods.
1300    if (is_osr_method()) {
1301      // this effectively makes the osr nmethod not entrant
1302      invalidate_osr_method();
1303    }
1304
1305    // Enter critical section.  Does not block for safepoint.
1306    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1307
1308    if (_state == state) {
1309      // another thread already performed this transition so nothing
1310      // to do, but return false to indicate this.
1311      return false;
1312    }
1313
1314    // The caller can be calling the method statically or through an inline
1315    // cache call.
1316    if (!is_osr_method() && !is_not_entrant()) {
1317      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1318                  SharedRuntime::get_handle_wrong_method_stub());
1319    }
1320
1321    if (is_in_use()) {
1322      // It's a true state change, so mark the method as decompiled.
1323      // Do it only for transition from alive.
1324      inc_decompile_count();
1325    }
1326
1327    // If the state is becoming a zombie, signal to unregister the nmethod with
1328    // the heap.
1329    // This nmethod may have already been unloaded during a full GC.
1330    if ((state == zombie) && !is_unloaded()) {
1331      nmethod_needs_unregister = true;
1332    }
1333
1334    // Must happen before state change. Otherwise we have a race condition in
1335    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1336    // transition its state from 'not_entrant' to 'zombie' without having to wait
1337    // for stack scanning.
1338    if (state == not_entrant) {
1339      mark_as_seen_on_stack();
1340      OrderAccess::storestore();
1341    }
1342
1343    // Change state
1344    _state = state;
1345
1346    // Log the transition once
1347    log_state_change();
1348
1349    // Remove nmethod from method.
1350    // We need to check if both the _code and _from_compiled_code_entry_point
1351    // refer to this nmethod because there is a race in setting these two fields
1352    // in Method* as seen in bugid 4947125.
1353    // If the vep() points to the zombie nmethod, the memory for the nmethod
1354    // could be flushed and the compiler and vtable stubs could still call
1355    // through it.
1356    if (method() != NULL && (method()->code() == this ||
1357                             method()->from_compiled_entry() == verified_entry_point())) {
1358      HandleMark hm;
1359      method()->clear_code();
1360    }
1361  } // leave critical region under Patching_lock
1362
1363  // When the nmethod becomes zombie it is no longer alive so the
1364  // dependencies must be flushed.  nmethods in the not_entrant
1365  // state will be flushed later when the transition to zombie
1366  // happens or they get unloaded.
1367  if (state == zombie) {
1368    {
1369      // Flushing dependecies must be done before any possible
1370      // safepoint can sneak in, otherwise the oops used by the
1371      // dependency logic could have become stale.
1372      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1373      if (nmethod_needs_unregister) {
1374        Universe::heap()->unregister_nmethod(this);
1375      }
1376      flush_dependencies(NULL);
1377    }
1378
1379    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1380    // event and it hasn't already been reported for this nmethod then
1381    // report it now. The event may have been reported earilier if the GC
1382    // marked it for unloading). JvmtiDeferredEventQueue support means
1383    // we no longer go to a safepoint here.
1384    post_compiled_method_unload();
1385
1386#ifdef ASSERT
1387    // It's no longer safe to access the oops section since zombie
1388    // nmethods aren't scanned for GC.
1389    _oops_are_stale = true;
1390#endif
1391     // the Method may be reclaimed by class unloading now that the
1392     // nmethod is in zombie state
1393    set_method(NULL);
1394  } else {
1395    assert(state == not_entrant, "other cases may need to be handled differently");
1396  }
1397
1398  if (TraceCreateZombies) {
1399    tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
1400  }
1401
1402  NMethodSweeper::report_state_change(this);
1403  return true;
1404}
1405
1406void nmethod::flush() {
1407  // Note that there are no valid oops in the nmethod anymore.
1408  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1409  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1410
1411  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1412  assert_locked_or_safepoint(CodeCache_lock);
1413
1414  // completely deallocate this method
1415  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
1416  if (PrintMethodFlushing) {
1417    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
1418        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
1419  }
1420
1421  // We need to deallocate any ExceptionCache data.
1422  // Note that we do not need to grab the nmethod lock for this, it
1423  // better be thread safe if we're disposing of it!
1424  ExceptionCache* ec = exception_cache();
1425  set_exception_cache(NULL);
1426  while(ec != NULL) {
1427    ExceptionCache* next = ec->next();
1428    delete ec;
1429    ec = next;
1430  }
1431
1432  if (on_scavenge_root_list()) {
1433    CodeCache::drop_scavenge_root_nmethod(this);
1434  }
1435
1436#ifdef SHARK
1437  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1438#endif // SHARK
1439
1440  ((CodeBlob*)(this))->flush();
1441
1442  CodeCache::free(this);
1443}
1444
1445//
1446// Notify all classes this nmethod is dependent on that it is no
1447// longer dependent. This should only be called in two situations.
1448// First, when a nmethod transitions to a zombie all dependents need
1449// to be clear.  Since zombification happens at a safepoint there's no
1450// synchronization issues.  The second place is a little more tricky.
1451// During phase 1 of mark sweep class unloading may happen and as a
1452// result some nmethods may get unloaded.  In this case the flushing
1453// of dependencies must happen during phase 1 since after GC any
1454// dependencies in the unloaded nmethod won't be updated, so
1455// traversing the dependency information in unsafe.  In that case this
1456// function is called with a non-NULL argument and this function only
1457// notifies instanceKlasses that are reachable
1458
1459void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1460  assert_locked_or_safepoint(CodeCache_lock);
1461  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1462  "is_alive is non-NULL if and only if we are called during GC");
1463  if (!has_flushed_dependencies()) {
1464    set_has_flushed_dependencies();
1465    for (Dependencies::DepStream deps(this); deps.next(); ) {
1466      Klass* klass = deps.context_type();
1467      if (klass == NULL)  continue;  // ignore things like evol_method
1468
1469      // During GC the is_alive closure is non-NULL, and is used to
1470      // determine liveness of dependees that need to be updated.
1471      if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1472        InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1473      }
1474    }
1475  }
1476}
1477
1478
1479// If this oop is not live, the nmethod can be unloaded.
1480bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1481  assert(root != NULL, "just checking");
1482  oop obj = *root;
1483  if (obj == NULL || is_alive->do_object_b(obj)) {
1484      return false;
1485  }
1486
1487  // If ScavengeRootsInCode is true, an nmethod might be unloaded
1488  // simply because one of its constant oops has gone dead.
1489  // No actual classes need to be unloaded in order for this to occur.
1490  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1491  make_unloaded(is_alive, obj);
1492  return true;
1493}
1494
1495// ------------------------------------------------------------------
1496// post_compiled_method_load_event
1497// new method for install_code() path
1498// Transfer information from compilation to jvmti
1499void nmethod::post_compiled_method_load_event() {
1500
1501  Method* moop = method();
1502  HOTSPOT_COMPILED_METHOD_LOAD(
1503      (char *) moop->klass_name()->bytes(),
1504      moop->klass_name()->utf8_length(),
1505      (char *) moop->name()->bytes(),
1506      moop->name()->utf8_length(),
1507      (char *) moop->signature()->bytes(),
1508      moop->signature()->utf8_length(),
1509      insts_begin(), insts_size());
1510
1511  if (JvmtiExport::should_post_compiled_method_load() ||
1512      JvmtiExport::should_post_compiled_method_unload()) {
1513    get_and_cache_jmethod_id();
1514  }
1515
1516  if (JvmtiExport::should_post_compiled_method_load()) {
1517    // Let the Service thread (which is a real Java thread) post the event
1518    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1519    JvmtiDeferredEventQueue::enqueue(
1520      JvmtiDeferredEvent::compiled_method_load_event(this));
1521  }
1522}
1523
1524jmethodID nmethod::get_and_cache_jmethod_id() {
1525  if (_jmethod_id == NULL) {
1526    // Cache the jmethod_id since it can no longer be looked up once the
1527    // method itself has been marked for unloading.
1528    _jmethod_id = method()->jmethod_id();
1529  }
1530  return _jmethod_id;
1531}
1532
1533void nmethod::post_compiled_method_unload() {
1534  if (unload_reported()) {
1535    // During unloading we transition to unloaded and then to zombie
1536    // and the unloading is reported during the first transition.
1537    return;
1538  }
1539
1540  assert(_method != NULL && !is_unloaded(), "just checking");
1541  DTRACE_METHOD_UNLOAD_PROBE(method());
1542
1543  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1544  // post the event. Sometime later this nmethod will be made a zombie
1545  // by the sweeper but the Method* will not be valid at that point.
1546  // If the _jmethod_id is null then no load event was ever requested
1547  // so don't bother posting the unload.  The main reason for this is
1548  // that the jmethodID is a weak reference to the Method* so if
1549  // it's being unloaded there's no way to look it up since the weak
1550  // ref will have been cleared.
1551  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1552    assert(!unload_reported(), "already unloaded");
1553    JvmtiDeferredEvent event =
1554      JvmtiDeferredEvent::compiled_method_unload_event(this,
1555          _jmethod_id, insts_begin());
1556    if (SafepointSynchronize::is_at_safepoint()) {
1557      // Don't want to take the queueing lock. Add it as pending and
1558      // it will get enqueued later.
1559      JvmtiDeferredEventQueue::add_pending_event(event);
1560    } else {
1561      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1562      JvmtiDeferredEventQueue::enqueue(event);
1563    }
1564  }
1565
1566  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1567  // any time. As the nmethod is being unloaded now we mark it has
1568  // having the unload event reported - this will ensure that we don't
1569  // attempt to report the event in the unlikely scenario where the
1570  // event is enabled at the time the nmethod is made a zombie.
1571  set_unload_reported();
1572}
1573
1574void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
1575  if (ic->is_icholder_call()) {
1576    // The only exception is compiledICHolder oops which may
1577    // yet be marked below. (We check this further below).
1578    CompiledICHolder* cichk_oop = ic->cached_icholder();
1579
1580    if (mark_on_stack) {
1581      Metadata::mark_on_stack(cichk_oop->holder_method());
1582      Metadata::mark_on_stack(cichk_oop->holder_klass());
1583    }
1584
1585    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1586        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1587      return;
1588    }
1589  } else {
1590    Metadata* ic_oop = ic->cached_metadata();
1591    if (ic_oop != NULL) {
1592      if (mark_on_stack) {
1593        Metadata::mark_on_stack(ic_oop);
1594      }
1595
1596      if (ic_oop->is_klass()) {
1597        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1598          return;
1599        }
1600      } else if (ic_oop->is_method()) {
1601        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1602          return;
1603        }
1604      } else {
1605        ShouldNotReachHere();
1606      }
1607    }
1608  }
1609
1610  ic->set_to_clean();
1611}
1612
1613// This is called at the end of the strong tracing/marking phase of a
1614// GC to unload an nmethod if it contains otherwise unreachable
1615// oops.
1616
1617void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1618  // Make sure the oop's ready to receive visitors
1619  assert(!is_zombie() && !is_unloaded(),
1620         "should not call follow on zombie or unloaded nmethod");
1621
1622  // If the method is not entrant then a JMP is plastered over the
1623  // first few bytes.  If an oop in the old code was there, that oop
1624  // should not get GC'd.  Skip the first few bytes of oops on
1625  // not-entrant methods.
1626  address low_boundary = verified_entry_point();
1627  if (is_not_entrant()) {
1628    low_boundary += NativeJump::instruction_size;
1629    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1630    // (See comment above.)
1631  }
1632
1633  // The RedefineClasses() API can cause the class unloading invariant
1634  // to no longer be true. See jvmtiExport.hpp for details.
1635  // Also, leave a debugging breadcrumb in local flag.
1636  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1637  if (a_class_was_redefined) {
1638    // This set of the unloading_occurred flag is done before the
1639    // call to post_compiled_method_unload() so that the unloading
1640    // of this nmethod is reported.
1641    unloading_occurred = true;
1642  }
1643
1644  // Exception cache
1645  clean_exception_cache(is_alive);
1646
1647  // If class unloading occurred we first iterate over all inline caches and
1648  // clear ICs where the cached oop is referring to an unloaded klass or method.
1649  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1650  // iteration below.
1651  if (unloading_occurred) {
1652    RelocIterator iter(this, low_boundary);
1653    while(iter.next()) {
1654      if (iter.type() == relocInfo::virtual_call_type) {
1655        CompiledIC *ic = CompiledIC_at(&iter);
1656        clean_ic_if_metadata_is_dead(ic, is_alive, false);
1657      }
1658    }
1659  }
1660
1661  // Compiled code
1662  {
1663  RelocIterator iter(this, low_boundary);
1664  while (iter.next()) {
1665    if (iter.type() == relocInfo::oop_type) {
1666      oop_Relocation* r = iter.oop_reloc();
1667      // In this loop, we must only traverse those oops directly embedded in
1668      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1669      assert(1 == (r->oop_is_immediate()) +
1670                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1671             "oop must be found in exactly one place");
1672      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1673        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1674          return;
1675        }
1676      }
1677    }
1678  }
1679  }
1680
1681
1682  // Scopes
1683  for (oop* p = oops_begin(); p < oops_end(); p++) {
1684    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1685    if (can_unload(is_alive, p, unloading_occurred)) {
1686      return;
1687    }
1688  }
1689
1690  // Ensure that all metadata is still alive
1691  verify_metadata_loaders(low_boundary, is_alive);
1692}
1693
1694template <class CompiledICorStaticCall>
1695static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1696  // Ok, to lookup references to zombies here
1697  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1698  if (cb != NULL && cb->is_nmethod()) {
1699    nmethod* nm = (nmethod*)cb;
1700
1701    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1702      // The nmethod has not been processed yet.
1703      return true;
1704    }
1705
1706    // Clean inline caches pointing to both zombie and not_entrant methods
1707    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1708      ic->set_to_clean();
1709      assert(ic->is_clean(), err_msg("nmethod " PTR_FORMAT "not clean %s", from, from->method()->name_and_sig_as_C_string()));
1710    }
1711  }
1712
1713  return false;
1714}
1715
1716static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1717  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1718}
1719
1720static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1721  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1722}
1723
1724bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
1725  assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
1726
1727  oop_Relocation* r = iter_at_oop->oop_reloc();
1728  // Traverse those oops directly embedded in the code.
1729  // Other oops (oop_index>0) are seen as part of scopes_oops.
1730  assert(1 == (r->oop_is_immediate()) +
1731         (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1732         "oop must be found in exactly one place");
1733  if (r->oop_is_immediate() && r->oop_value() != NULL) {
1734    // Unload this nmethod if the oop is dead.
1735    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1736      return true;;
1737    }
1738  }
1739
1740  return false;
1741}
1742
1743void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
1744  assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
1745
1746  metadata_Relocation* r = iter_at_metadata->metadata_reloc();
1747  // In this metadata, we must only follow those metadatas directly embedded in
1748  // the code.  Other metadatas (oop_index>0) are seen as part of
1749  // the metadata section below.
1750  assert(1 == (r->metadata_is_immediate()) +
1751         (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1752         "metadata must be found in exactly one place");
1753  if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1754    Metadata* md = r->metadata_value();
1755    if (md != _method) Metadata::mark_on_stack(md);
1756  }
1757}
1758
1759void nmethod::mark_metadata_on_stack_non_relocs() {
1760    // Visit the metadata section
1761    for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1762      if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
1763      Metadata* md = *p;
1764      Metadata::mark_on_stack(md);
1765    }
1766
1767    // Visit metadata not embedded in the other places.
1768    if (_method != NULL) Metadata::mark_on_stack(_method);
1769}
1770
1771bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1772  ResourceMark rm;
1773
1774  // Make sure the oop's ready to receive visitors
1775  assert(!is_zombie() && !is_unloaded(),
1776         "should not call follow on zombie or unloaded nmethod");
1777
1778  // If the method is not entrant then a JMP is plastered over the
1779  // first few bytes.  If an oop in the old code was there, that oop
1780  // should not get GC'd.  Skip the first few bytes of oops on
1781  // not-entrant methods.
1782  address low_boundary = verified_entry_point();
1783  if (is_not_entrant()) {
1784    low_boundary += NativeJump::instruction_size;
1785    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1786    // (See comment above.)
1787  }
1788
1789  // The RedefineClasses() API can cause the class unloading invariant
1790  // to no longer be true. See jvmtiExport.hpp for details.
1791  // Also, leave a debugging breadcrumb in local flag.
1792  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
1793  if (a_class_was_redefined) {
1794    // This set of the unloading_occurred flag is done before the
1795    // call to post_compiled_method_unload() so that the unloading
1796    // of this nmethod is reported.
1797    unloading_occurred = true;
1798  }
1799
1800  // When class redefinition is used all metadata in the CodeCache has to be recorded,
1801  // so that unused "previous versions" can be purged. Since walking the CodeCache can
1802  // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
1803  bool mark_metadata_on_stack = a_class_was_redefined;
1804
1805  // Exception cache
1806  clean_exception_cache(is_alive);
1807
1808  bool is_unloaded = false;
1809  bool postponed = false;
1810
1811  RelocIterator iter(this, low_boundary);
1812  while(iter.next()) {
1813
1814    switch (iter.type()) {
1815
1816    case relocInfo::virtual_call_type:
1817      if (unloading_occurred) {
1818        // If class unloading occurred we first iterate over all inline caches and
1819        // clear ICs where the cached oop is referring to an unloaded klass or method.
1820        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
1821      }
1822
1823      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1824      break;
1825
1826    case relocInfo::opt_virtual_call_type:
1827      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1828      break;
1829
1830    case relocInfo::static_call_type:
1831      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1832      break;
1833
1834    case relocInfo::oop_type:
1835      if (!is_unloaded) {
1836        is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
1837      }
1838      break;
1839
1840    case relocInfo::metadata_type:
1841      if (mark_metadata_on_stack) {
1842        mark_metadata_on_stack_at(&iter);
1843      }
1844    }
1845  }
1846
1847  if (mark_metadata_on_stack) {
1848    mark_metadata_on_stack_non_relocs();
1849  }
1850
1851  if (is_unloaded) {
1852    return postponed;
1853  }
1854
1855  // Scopes
1856  for (oop* p = oops_begin(); p < oops_end(); p++) {
1857    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1858    if (can_unload(is_alive, p, unloading_occurred)) {
1859      is_unloaded = true;
1860      break;
1861    }
1862  }
1863
1864  if (is_unloaded) {
1865    return postponed;
1866  }
1867
1868  // Ensure that all metadata is still alive
1869  verify_metadata_loaders(low_boundary, is_alive);
1870
1871  return postponed;
1872}
1873
1874void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
1875  ResourceMark rm;
1876
1877  // Make sure the oop's ready to receive visitors
1878  assert(!is_zombie(),
1879         "should not call follow on zombie nmethod");
1880
1881  // If the method is not entrant then a JMP is plastered over the
1882  // first few bytes.  If an oop in the old code was there, that oop
1883  // should not get GC'd.  Skip the first few bytes of oops on
1884  // not-entrant methods.
1885  address low_boundary = verified_entry_point();
1886  if (is_not_entrant()) {
1887    low_boundary += NativeJump::instruction_size;
1888    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1889    // (See comment above.)
1890  }
1891
1892  RelocIterator iter(this, low_boundary);
1893  while(iter.next()) {
1894
1895    switch (iter.type()) {
1896
1897    case relocInfo::virtual_call_type:
1898      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1899      break;
1900
1901    case relocInfo::opt_virtual_call_type:
1902      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1903      break;
1904
1905    case relocInfo::static_call_type:
1906      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1907      break;
1908    }
1909  }
1910}
1911
1912#ifdef ASSERT
1913
1914class CheckClass : AllStatic {
1915  static BoolObjectClosure* _is_alive;
1916
1917  // Check class_loader is alive for this bit of metadata.
1918  static void check_class(Metadata* md) {
1919    Klass* klass = NULL;
1920    if (md->is_klass()) {
1921      klass = ((Klass*)md);
1922    } else if (md->is_method()) {
1923      klass = ((Method*)md)->method_holder();
1924    } else if (md->is_methodData()) {
1925      klass = ((MethodData*)md)->method()->method_holder();
1926    } else {
1927      md->print();
1928      ShouldNotReachHere();
1929    }
1930    assert(klass->is_loader_alive(_is_alive), "must be alive");
1931  }
1932 public:
1933  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
1934    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
1935    _is_alive = is_alive;
1936    nm->metadata_do(check_class);
1937  }
1938};
1939
1940// This is called during a safepoint so can use static data
1941BoolObjectClosure* CheckClass::_is_alive = NULL;
1942#endif // ASSERT
1943
1944
1945// Processing of oop references should have been sufficient to keep
1946// all strong references alive.  Any weak references should have been
1947// cleared as well.  Visit all the metadata and ensure that it's
1948// really alive.
1949void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
1950#ifdef ASSERT
1951    RelocIterator iter(this, low_boundary);
1952    while (iter.next()) {
1953    // static_stub_Relocations may have dangling references to
1954    // Method*s so trim them out here.  Otherwise it looks like
1955    // compiled code is maintaining a link to dead metadata.
1956    address static_call_addr = NULL;
1957    if (iter.type() == relocInfo::opt_virtual_call_type) {
1958      CompiledIC* cic = CompiledIC_at(&iter);
1959      if (!cic->is_call_to_interpreted()) {
1960        static_call_addr = iter.addr();
1961      }
1962    } else if (iter.type() == relocInfo::static_call_type) {
1963      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
1964      if (!csc->is_call_to_interpreted()) {
1965        static_call_addr = iter.addr();
1966      }
1967    }
1968    if (static_call_addr != NULL) {
1969      RelocIterator sciter(this, low_boundary);
1970      while (sciter.next()) {
1971        if (sciter.type() == relocInfo::static_stub_type &&
1972            sciter.static_stub_reloc()->static_call() == static_call_addr) {
1973          sciter.static_stub_reloc()->clear_inline_cache();
1974        }
1975      }
1976    }
1977  }
1978  // Check that the metadata embedded in the nmethod is alive
1979  CheckClass::do_check_class(is_alive, this);
1980#endif
1981}
1982
1983
1984// Iterate over metadata calling this function.   Used by RedefineClasses
1985void nmethod::metadata_do(void f(Metadata*)) {
1986  address low_boundary = verified_entry_point();
1987  if (is_not_entrant()) {
1988    low_boundary += NativeJump::instruction_size;
1989    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1990    // (See comment above.)
1991  }
1992  {
1993    // Visit all immediate references that are embedded in the instruction stream.
1994    RelocIterator iter(this, low_boundary);
1995    while (iter.next()) {
1996      if (iter.type() == relocInfo::metadata_type ) {
1997        metadata_Relocation* r = iter.metadata_reloc();
1998        // In this metadata, we must only follow those metadatas directly embedded in
1999        // the code.  Other metadatas (oop_index>0) are seen as part of
2000        // the metadata section below.
2001        assert(1 == (r->metadata_is_immediate()) +
2002               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2003               "metadata must be found in exactly one place");
2004        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2005          Metadata* md = r->metadata_value();
2006          if (md != _method) f(md);
2007        }
2008      } else if (iter.type() == relocInfo::virtual_call_type) {
2009        // Check compiledIC holders associated with this nmethod
2010        CompiledIC *ic = CompiledIC_at(&iter);
2011        if (ic->is_icholder_call()) {
2012          CompiledICHolder* cichk = ic->cached_icholder();
2013          f(cichk->holder_method());
2014          f(cichk->holder_klass());
2015        } else {
2016          Metadata* ic_oop = ic->cached_metadata();
2017          if (ic_oop != NULL) {
2018            f(ic_oop);
2019          }
2020        }
2021      }
2022    }
2023  }
2024
2025  // Visit the metadata section
2026  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2027    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
2028    Metadata* md = *p;
2029    f(md);
2030  }
2031
2032  // Visit metadata not embedded in the other places.
2033  if (_method != NULL) f(_method);
2034}
2035
2036void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2037  // make sure the oops ready to receive visitors
2038  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2039  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2040
2041  // If the method is not entrant or zombie then a JMP is plastered over the
2042  // first few bytes.  If an oop in the old code was there, that oop
2043  // should not get GC'd.  Skip the first few bytes of oops on
2044  // not-entrant methods.
2045  address low_boundary = verified_entry_point();
2046  if (is_not_entrant()) {
2047    low_boundary += NativeJump::instruction_size;
2048    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2049    // (See comment above.)
2050  }
2051
2052  RelocIterator iter(this, low_boundary);
2053
2054  while (iter.next()) {
2055    if (iter.type() == relocInfo::oop_type ) {
2056      oop_Relocation* r = iter.oop_reloc();
2057      // In this loop, we must only follow those oops directly embedded in
2058      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2059      assert(1 == (r->oop_is_immediate()) +
2060                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2061             "oop must be found in exactly one place");
2062      if (r->oop_is_immediate() && r->oop_value() != NULL) {
2063        f->do_oop(r->oop_addr());
2064      }
2065    }
2066  }
2067
2068  // Scopes
2069  // This includes oop constants not inlined in the code stream.
2070  for (oop* p = oops_begin(); p < oops_end(); p++) {
2071    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2072    f->do_oop(p);
2073  }
2074}
2075
2076#define NMETHOD_SENTINEL ((nmethod*)badAddress)
2077
2078nmethod* volatile nmethod::_oops_do_mark_nmethods;
2079
2080// An nmethod is "marked" if its _mark_link is set non-null.
2081// Even if it is the end of the linked list, it will have a non-null link value,
2082// as long as it is on the list.
2083// This code must be MP safe, because it is used from parallel GC passes.
2084bool nmethod::test_set_oops_do_mark() {
2085  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
2086  nmethod* observed_mark_link = _oops_do_mark_link;
2087  if (observed_mark_link == NULL) {
2088    // Claim this nmethod for this thread to mark.
2089    observed_mark_link = (nmethod*)
2090      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
2091    if (observed_mark_link == NULL) {
2092
2093      // Atomically append this nmethod (now claimed) to the head of the list:
2094      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
2095      for (;;) {
2096        nmethod* required_mark_nmethods = observed_mark_nmethods;
2097        _oops_do_mark_link = required_mark_nmethods;
2098        observed_mark_nmethods = (nmethod*)
2099          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
2100        if (observed_mark_nmethods == required_mark_nmethods)
2101          break;
2102      }
2103      // Mark was clear when we first saw this guy.
2104      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
2105      return false;
2106    }
2107  }
2108  // On fall through, another racing thread marked this nmethod before we did.
2109  return true;
2110}
2111
2112void nmethod::oops_do_marking_prologue() {
2113  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
2114  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2115  // We use cmpxchg_ptr instead of regular assignment here because the user
2116  // may fork a bunch of threads, and we need them all to see the same state.
2117  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2118  guarantee(observed == NULL, "no races in this sequential code");
2119}
2120
2121void nmethod::oops_do_marking_epilogue() {
2122  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2123  nmethod* cur = _oops_do_mark_nmethods;
2124  while (cur != NMETHOD_SENTINEL) {
2125    assert(cur != NULL, "not NULL-terminated");
2126    nmethod* next = cur->_oops_do_mark_link;
2127    cur->_oops_do_mark_link = NULL;
2128    cur->verify_oop_relocations();
2129    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
2130    cur = next;
2131  }
2132  void* required = _oops_do_mark_nmethods;
2133  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2134  guarantee(observed == required, "no races in this sequential code");
2135  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
2136}
2137
2138class DetectScavengeRoot: public OopClosure {
2139  bool     _detected_scavenge_root;
2140public:
2141  DetectScavengeRoot() : _detected_scavenge_root(false)
2142  { NOT_PRODUCT(_print_nm = NULL); }
2143  bool detected_scavenge_root() { return _detected_scavenge_root; }
2144  virtual void do_oop(oop* p) {
2145    if ((*p) != NULL && (*p)->is_scavengable()) {
2146      NOT_PRODUCT(maybe_print(p));
2147      _detected_scavenge_root = true;
2148    }
2149  }
2150  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2151
2152#ifndef PRODUCT
2153  nmethod* _print_nm;
2154  void maybe_print(oop* p) {
2155    if (_print_nm == NULL)  return;
2156    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
2157    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
2158                  _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
2159                  (void *)(*p), (intptr_t)p);
2160    (*p)->print();
2161  }
2162#endif //PRODUCT
2163};
2164
2165bool nmethod::detect_scavenge_root_oops() {
2166  DetectScavengeRoot detect_scavenge_root;
2167  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
2168  oops_do(&detect_scavenge_root);
2169  return detect_scavenge_root.detected_scavenge_root();
2170}
2171
2172// Method that knows how to preserve outgoing arguments at call. This method must be
2173// called with a frame corresponding to a Java invoke
2174void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
2175#ifndef SHARK
2176  if (!method()->is_native()) {
2177    SimpleScopeDesc ssd(this, fr.pc());
2178    Bytecode_invoke call(ssd.method(), ssd.bci());
2179    bool has_receiver = call.has_receiver();
2180    bool has_appendix = call.has_appendix();
2181    Symbol* signature = call.signature();
2182    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
2183  }
2184#endif // !SHARK
2185}
2186
2187
2188oop nmethod::embeddedOop_at(u_char* p) {
2189  RelocIterator iter(this, p, p + 1);
2190  while (iter.next())
2191    if (iter.type() == relocInfo::oop_type) {
2192      return iter.oop_reloc()->oop_value();
2193    }
2194  return NULL;
2195}
2196
2197
2198inline bool includes(void* p, void* from, void* to) {
2199  return from <= p && p < to;
2200}
2201
2202
2203void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2204  assert(count >= 2, "must be sentinel values, at least");
2205
2206#ifdef ASSERT
2207  // must be sorted and unique; we do a binary search in find_pc_desc()
2208  int prev_offset = pcs[0].pc_offset();
2209  assert(prev_offset == PcDesc::lower_offset_limit,
2210         "must start with a sentinel");
2211  for (int i = 1; i < count; i++) {
2212    int this_offset = pcs[i].pc_offset();
2213    assert(this_offset > prev_offset, "offsets must be sorted");
2214    prev_offset = this_offset;
2215  }
2216  assert(prev_offset == PcDesc::upper_offset_limit,
2217         "must end with a sentinel");
2218#endif //ASSERT
2219
2220  // Search for MethodHandle invokes and tag the nmethod.
2221  for (int i = 0; i < count; i++) {
2222    if (pcs[i].is_method_handle_invoke()) {
2223      set_has_method_handle_invokes(true);
2224      break;
2225    }
2226  }
2227  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2228
2229  int size = count * sizeof(PcDesc);
2230  assert(scopes_pcs_size() >= size, "oob");
2231  memcpy(scopes_pcs_begin(), pcs, size);
2232
2233  // Adjust the final sentinel downward.
2234  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2235  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2236  last_pc->set_pc_offset(content_size() + 1);
2237  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2238    // Fill any rounding gaps with copies of the last record.
2239    last_pc[1] = last_pc[0];
2240  }
2241  // The following assert could fail if sizeof(PcDesc) is not
2242  // an integral multiple of oopSize (the rounding term).
2243  // If it fails, change the logic to always allocate a multiple
2244  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2245  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2246}
2247
2248void nmethod::copy_scopes_data(u_char* buffer, int size) {
2249  assert(scopes_data_size() >= size, "oob");
2250  memcpy(scopes_data_begin(), buffer, size);
2251}
2252
2253
2254#ifdef ASSERT
2255static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2256  PcDesc* lower = nm->scopes_pcs_begin();
2257  PcDesc* upper = nm->scopes_pcs_end();
2258  lower += 1; // exclude initial sentinel
2259  PcDesc* res = NULL;
2260  for (PcDesc* p = lower; p < upper; p++) {
2261    NOT_PRODUCT(--nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2262    if (match_desc(p, pc_offset, approximate)) {
2263      if (res == NULL)
2264        res = p;
2265      else
2266        res = (PcDesc*) badAddress;
2267    }
2268  }
2269  return res;
2270}
2271#endif
2272
2273
2274// Finds a PcDesc with real-pc equal to "pc"
2275PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2276  address base_address = code_begin();
2277  if ((pc < base_address) ||
2278      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2279    return NULL;  // PC is wildly out of range
2280  }
2281  int pc_offset = (int) (pc - base_address);
2282
2283  // Check the PcDesc cache if it contains the desired PcDesc
2284  // (This as an almost 100% hit rate.)
2285  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2286  if (res != NULL) {
2287    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2288    return res;
2289  }
2290
2291  // Fallback algorithm: quasi-linear search for the PcDesc
2292  // Find the last pc_offset less than the given offset.
2293  // The successor must be the required match, if there is a match at all.
2294  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2295  PcDesc* lower = scopes_pcs_begin();
2296  PcDesc* upper = scopes_pcs_end();
2297  upper -= 1; // exclude final sentinel
2298  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2299
2300#define assert_LU_OK \
2301  /* invariant on lower..upper during the following search: */ \
2302  assert(lower->pc_offset() <  pc_offset, "sanity"); \
2303  assert(upper->pc_offset() >= pc_offset, "sanity")
2304  assert_LU_OK;
2305
2306  // Use the last successful return as a split point.
2307  PcDesc* mid = _pc_desc_cache.last_pc_desc();
2308  NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2309  if (mid->pc_offset() < pc_offset) {
2310    lower = mid;
2311  } else {
2312    upper = mid;
2313  }
2314
2315  // Take giant steps at first (4096, then 256, then 16, then 1)
2316  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2317  const int RADIX = (1 << LOG2_RADIX);
2318  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2319    while ((mid = lower + step) < upper) {
2320      assert_LU_OK;
2321      NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2322      if (mid->pc_offset() < pc_offset) {
2323        lower = mid;
2324      } else {
2325        upper = mid;
2326        break;
2327      }
2328    }
2329    assert_LU_OK;
2330  }
2331
2332  // Sneak up on the value with a linear search of length ~16.
2333  while (true) {
2334    assert_LU_OK;
2335    mid = lower + 1;
2336    NOT_PRODUCT(++nmethod_stats.pc_desc_searches);
2337    if (mid->pc_offset() < pc_offset) {
2338      lower = mid;
2339    } else {
2340      upper = mid;
2341      break;
2342    }
2343  }
2344#undef assert_LU_OK
2345
2346  if (match_desc(upper, pc_offset, approximate)) {
2347    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2348    _pc_desc_cache.add_pc_desc(upper);
2349    return upper;
2350  } else {
2351    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2352    return NULL;
2353  }
2354}
2355
2356
2357void nmethod::check_all_dependencies(DepChange& changes) {
2358  // Checked dependencies are allocated into this ResourceMark
2359  ResourceMark rm;
2360
2361  // Turn off dependency tracing while actually testing dependencies.
2362  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2363
2364  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2365                            &DependencySignature::equals, 11027> DepTable;
2366
2367  DepTable* table = new DepTable();
2368
2369  // Iterate over live nmethods and check dependencies of all nmethods that are not
2370  // marked for deoptimization. A particular dependency is only checked once.
2371  NMethodIterator iter;
2372  while(iter.next()) {
2373    nmethod* nm = iter.method();
2374    // Only notify for live nmethods
2375    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2376      for (Dependencies::DepStream deps(nm); deps.next(); ) {
2377        // Construct abstraction of a dependency.
2378        DependencySignature* current_sig = new DependencySignature(deps);
2379
2380        // Determine if dependency is already checked. table->put(...) returns
2381        // 'true' if the dependency is added (i.e., was not in the hashtable).
2382        if (table->put(*current_sig, 1)) {
2383          if (deps.check_dependency() != NULL) {
2384            // Dependency checking failed. Print out information about the failed
2385            // dependency and finally fail with an assert. We can fail here, since
2386            // dependency checking is never done in a product build.
2387            changes.print();
2388            nm->print();
2389            nm->print_dependencies();
2390            assert(false, "Should have been marked for deoptimization");
2391          }
2392        }
2393      }
2394    }
2395  }
2396}
2397
2398bool nmethod::check_dependency_on(DepChange& changes) {
2399  // What has happened:
2400  // 1) a new class dependee has been added
2401  // 2) dependee and all its super classes have been marked
2402  bool found_check = false;  // set true if we are upset
2403  for (Dependencies::DepStream deps(this); deps.next(); ) {
2404    // Evaluate only relevant dependencies.
2405    if (deps.spot_check_dependency_at(changes) != NULL) {
2406      found_check = true;
2407      NOT_DEBUG(break);
2408    }
2409  }
2410  return found_check;
2411}
2412
2413bool nmethod::is_evol_dependent_on(Klass* dependee) {
2414  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2415  Array<Method*>* dependee_methods = dependee_ik->methods();
2416  for (Dependencies::DepStream deps(this); deps.next(); ) {
2417    if (deps.type() == Dependencies::evol_method) {
2418      Method* method = deps.method_argument(0);
2419      for (int j = 0; j < dependee_methods->length(); j++) {
2420        if (dependee_methods->at(j) == method) {
2421          // RC_TRACE macro has an embedded ResourceMark
2422          RC_TRACE(0x01000000,
2423            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2424            _method->method_holder()->external_name(),
2425            _method->name()->as_C_string(),
2426            _method->signature()->as_C_string(), compile_id(),
2427            method->method_holder()->external_name(),
2428            method->name()->as_C_string(),
2429            method->signature()->as_C_string()));
2430          if (TraceDependencies || LogCompilation)
2431            deps.log_dependency(dependee);
2432          return true;
2433        }
2434      }
2435    }
2436  }
2437  return false;
2438}
2439
2440// Called from mark_for_deoptimization, when dependee is invalidated.
2441bool nmethod::is_dependent_on_method(Method* dependee) {
2442  for (Dependencies::DepStream deps(this); deps.next(); ) {
2443    if (deps.type() != Dependencies::evol_method)
2444      continue;
2445    Method* method = deps.method_argument(0);
2446    if (method == dependee) return true;
2447  }
2448  return false;
2449}
2450
2451
2452bool nmethod::is_patchable_at(address instr_addr) {
2453  assert(insts_contains(instr_addr), "wrong nmethod used");
2454  if (is_zombie()) {
2455    // a zombie may never be patched
2456    return false;
2457  }
2458  return true;
2459}
2460
2461
2462address nmethod::continuation_for_implicit_exception(address pc) {
2463  // Exception happened outside inline-cache check code => we are inside
2464  // an active nmethod => use cpc to determine a return address
2465  int exception_offset = pc - code_begin();
2466  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2467#ifdef ASSERT
2468  if (cont_offset == 0) {
2469    Thread* thread = ThreadLocalStorage::get_thread_slow();
2470    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2471    HandleMark hm(thread);
2472    ResourceMark rm(thread);
2473    CodeBlob* cb = CodeCache::find_blob(pc);
2474    assert(cb != NULL && cb == this, "");
2475    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, pc);
2476    print();
2477    method()->print_codes();
2478    print_code();
2479    print_pcs();
2480  }
2481#endif
2482  if (cont_offset == 0) {
2483    // Let the normal error handling report the exception
2484    return NULL;
2485  }
2486  return code_begin() + cont_offset;
2487}
2488
2489
2490
2491void nmethod_init() {
2492  // make sure you didn't forget to adjust the filler fields
2493  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2494}
2495
2496
2497//-------------------------------------------------------------------------------------------
2498
2499
2500// QQQ might we make this work from a frame??
2501nmethodLocker::nmethodLocker(address pc) {
2502  CodeBlob* cb = CodeCache::find_blob(pc);
2503  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2504  _nm = (nmethod*)cb;
2505  lock_nmethod(_nm);
2506}
2507
2508// Only JvmtiDeferredEvent::compiled_method_unload_event()
2509// should pass zombie_ok == true.
2510void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2511  if (nm == NULL)  return;
2512  Atomic::inc(&nm->_lock_count);
2513  assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2514}
2515
2516void nmethodLocker::unlock_nmethod(nmethod* nm) {
2517  if (nm == NULL)  return;
2518  Atomic::dec(&nm->_lock_count);
2519  assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2520}
2521
2522
2523// -----------------------------------------------------------------------------
2524// nmethod::get_deopt_original_pc
2525//
2526// Return the original PC for the given PC if:
2527// (a) the given PC belongs to a nmethod and
2528// (b) it is a deopt PC
2529address nmethod::get_deopt_original_pc(const frame* fr) {
2530  if (fr->cb() == NULL)  return NULL;
2531
2532  nmethod* nm = fr->cb()->as_nmethod_or_null();
2533  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2534    return nm->get_original_pc(fr);
2535
2536  return NULL;
2537}
2538
2539
2540// -----------------------------------------------------------------------------
2541// MethodHandle
2542
2543bool nmethod::is_method_handle_return(address return_pc) {
2544  if (!has_method_handle_invokes())  return false;
2545  PcDesc* pd = pc_desc_at(return_pc);
2546  if (pd == NULL)
2547    return false;
2548  return pd->is_method_handle_invoke();
2549}
2550
2551
2552// -----------------------------------------------------------------------------
2553// Verification
2554
2555class VerifyOopsClosure: public OopClosure {
2556  nmethod* _nm;
2557  bool     _ok;
2558public:
2559  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2560  bool ok() { return _ok; }
2561  virtual void do_oop(oop* p) {
2562    if ((*p) == NULL || (*p)->is_oop())  return;
2563    if (_ok) {
2564      _nm->print_nmethod(true);
2565      _ok = false;
2566    }
2567    tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2568                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2569  }
2570  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2571};
2572
2573void nmethod::verify() {
2574
2575  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2576  // seems odd.
2577
2578  if( is_zombie() || is_not_entrant() )
2579    return;
2580
2581  // Make sure all the entry points are correctly aligned for patching.
2582  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2583
2584  // assert(method()->is_oop(), "must be valid");
2585
2586  ResourceMark rm;
2587
2588  if (!CodeCache::contains(this)) {
2589    fatal(err_msg("nmethod at " INTPTR_FORMAT " not in zone", this));
2590  }
2591
2592  if(is_native_method() )
2593    return;
2594
2595  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2596  if (nm != this) {
2597    fatal(err_msg("findNMethod did not find this nmethod (" INTPTR_FORMAT ")",
2598                  this));
2599  }
2600
2601  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2602    if (! p->verify(this)) {
2603      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", this);
2604    }
2605  }
2606
2607  VerifyOopsClosure voc(this);
2608  oops_do(&voc);
2609  assert(voc.ok(), "embedded oops must be OK");
2610  verify_scavenge_root_oops();
2611
2612  verify_scopes();
2613}
2614
2615
2616void nmethod::verify_interrupt_point(address call_site) {
2617  // Verify IC only when nmethod installation is finished.
2618  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2619                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2620  if (is_installed) {
2621    Thread *cur = Thread::current();
2622    if (CompiledIC_lock->owner() == cur ||
2623        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2624         SafepointSynchronize::is_at_safepoint())) {
2625      CompiledIC_at(this, call_site);
2626      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2627    } else {
2628      MutexLocker ml_verify (CompiledIC_lock);
2629      CompiledIC_at(this, call_site);
2630    }
2631  }
2632
2633  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2634  assert(pd != NULL, "PcDesc must exist");
2635  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2636                                     pd->obj_decode_offset(), pd->should_reexecute(),
2637                                     pd->return_oop());
2638       !sd->is_top(); sd = sd->sender()) {
2639    sd->verify();
2640  }
2641}
2642
2643void nmethod::verify_scopes() {
2644  if( !method() ) return;       // Runtime stubs have no scope
2645  if (method()->is_native()) return; // Ignore stub methods.
2646  // iterate through all interrupt point
2647  // and verify the debug information is valid.
2648  RelocIterator iter((nmethod*)this);
2649  while (iter.next()) {
2650    address stub = NULL;
2651    switch (iter.type()) {
2652      case relocInfo::virtual_call_type:
2653        verify_interrupt_point(iter.addr());
2654        break;
2655      case relocInfo::opt_virtual_call_type:
2656        stub = iter.opt_virtual_call_reloc()->static_stub();
2657        verify_interrupt_point(iter.addr());
2658        break;
2659      case relocInfo::static_call_type:
2660        stub = iter.static_call_reloc()->static_stub();
2661        //verify_interrupt_point(iter.addr());
2662        break;
2663      case relocInfo::runtime_call_type:
2664        address destination = iter.reloc()->value();
2665        // Right now there is no way to find out which entries support
2666        // an interrupt point.  It would be nice if we had this
2667        // information in a table.
2668        break;
2669    }
2670    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2671  }
2672}
2673
2674
2675// -----------------------------------------------------------------------------
2676// Non-product code
2677#ifndef PRODUCT
2678
2679class DebugScavengeRoot: public OopClosure {
2680  nmethod* _nm;
2681  bool     _ok;
2682public:
2683  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2684  bool ok() { return _ok; }
2685  virtual void do_oop(oop* p) {
2686    if ((*p) == NULL || !(*p)->is_scavengable())  return;
2687    if (_ok) {
2688      _nm->print_nmethod(true);
2689      _ok = false;
2690    }
2691    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
2692                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
2693    (*p)->print();
2694  }
2695  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2696};
2697
2698void nmethod::verify_scavenge_root_oops() {
2699  if (UseG1GC) {
2700    return;
2701  }
2702
2703  if (!on_scavenge_root_list()) {
2704    // Actually look inside, to verify the claim that it's clean.
2705    DebugScavengeRoot debug_scavenge_root(this);
2706    oops_do(&debug_scavenge_root);
2707    if (!debug_scavenge_root.ok())
2708      fatal("found an unadvertised bad scavengable oop in the code cache");
2709  }
2710  assert(scavenge_root_not_marked(), "");
2711}
2712
2713#endif // PRODUCT
2714
2715// Printing operations
2716
2717void nmethod::print() const {
2718  ResourceMark rm;
2719  ttyLocker ttyl;   // keep the following output all in one block
2720
2721  tty->print("Compiled method ");
2722
2723  if (is_compiled_by_c1()) {
2724    tty->print("(c1) ");
2725  } else if (is_compiled_by_c2()) {
2726    tty->print("(c2) ");
2727  } else if (is_compiled_by_shark()) {
2728    tty->print("(shark) ");
2729  } else {
2730    tty->print("(nm) ");
2731  }
2732
2733  print_on(tty, NULL);
2734
2735  if (WizardMode) {
2736    tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
2737    tty->print(" for method " INTPTR_FORMAT , (address)method());
2738    tty->print(" { ");
2739    if (is_in_use())      tty->print("in_use ");
2740    if (is_not_entrant()) tty->print("not_entrant ");
2741    if (is_zombie())      tty->print("zombie ");
2742    if (is_unloaded())    tty->print("unloaded ");
2743    if (on_scavenge_root_list())  tty->print("scavenge_root ");
2744    tty->print_cr("}:");
2745  }
2746  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2747                                              (address)this,
2748                                              (address)this + size(),
2749                                              size());
2750  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2751                                              relocation_begin(),
2752                                              relocation_end(),
2753                                              relocation_size());
2754  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2755                                              consts_begin(),
2756                                              consts_end(),
2757                                              consts_size());
2758  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2759                                              insts_begin(),
2760                                              insts_end(),
2761                                              insts_size());
2762  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2763                                              stub_begin(),
2764                                              stub_end(),
2765                                              stub_size());
2766  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2767                                              oops_begin(),
2768                                              oops_end(),
2769                                              oops_size());
2770  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2771                                              metadata_begin(),
2772                                              metadata_end(),
2773                                              metadata_size());
2774  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2775                                              scopes_data_begin(),
2776                                              scopes_data_end(),
2777                                              scopes_data_size());
2778  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2779                                              scopes_pcs_begin(),
2780                                              scopes_pcs_end(),
2781                                              scopes_pcs_size());
2782  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2783                                              dependencies_begin(),
2784                                              dependencies_end(),
2785                                              dependencies_size());
2786  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2787                                              handler_table_begin(),
2788                                              handler_table_end(),
2789                                              handler_table_size());
2790  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2791                                              nul_chk_table_begin(),
2792                                              nul_chk_table_end(),
2793                                              nul_chk_table_size());
2794}
2795
2796void nmethod::print_code() {
2797  HandleMark hm;
2798  ResourceMark m;
2799  Disassembler::decode(this);
2800}
2801
2802
2803#ifndef PRODUCT
2804
2805void nmethod::print_scopes() {
2806  // Find the first pc desc for all scopes in the code and print it.
2807  ResourceMark rm;
2808  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2809    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2810      continue;
2811
2812    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2813    sd->print_on(tty, p);
2814  }
2815}
2816
2817void nmethod::print_dependencies() {
2818  ResourceMark rm;
2819  ttyLocker ttyl;   // keep the following output all in one block
2820  tty->print_cr("Dependencies:");
2821  for (Dependencies::DepStream deps(this); deps.next(); ) {
2822    deps.print_dependency();
2823    Klass* ctxk = deps.context_type();
2824    if (ctxk != NULL) {
2825      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
2826        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
2827      }
2828    }
2829    deps.log_dependency();  // put it into the xml log also
2830  }
2831}
2832
2833
2834void nmethod::print_relocations() {
2835  ResourceMark m;       // in case methods get printed via the debugger
2836  tty->print_cr("relocations:");
2837  RelocIterator iter(this);
2838  iter.print();
2839  if (UseRelocIndex) {
2840    jint* index_end   = (jint*)relocation_end() - 1;
2841    jint  index_size  = *index_end;
2842    jint* index_start = (jint*)( (address)index_end - index_size );
2843    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", index_start, index_size);
2844    if (index_size > 0) {
2845      jint* ip;
2846      for (ip = index_start; ip+2 <= index_end; ip += 2)
2847        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
2848                      ip[0],
2849                      ip[1],
2850                      header_end()+ip[0],
2851                      relocation_begin()-1+ip[1]);
2852      for (; ip < index_end; ip++)
2853        tty->print_cr("  (%d ?)", ip[0]);
2854      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
2855      ip++;
2856      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
2857    }
2858  }
2859}
2860
2861
2862void nmethod::print_pcs() {
2863  ResourceMark m;       // in case methods get printed via debugger
2864  tty->print_cr("pc-bytecode offsets:");
2865  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2866    p->print(this);
2867  }
2868}
2869
2870#endif // PRODUCT
2871
2872const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
2873  RelocIterator iter(this, begin, end);
2874  bool have_one = false;
2875  while (iter.next()) {
2876    have_one = true;
2877    switch (iter.type()) {
2878        case relocInfo::none:                  return "no_reloc";
2879        case relocInfo::oop_type: {
2880          stringStream st;
2881          oop_Relocation* r = iter.oop_reloc();
2882          oop obj = r->oop_value();
2883          st.print("oop(");
2884          if (obj == NULL) st.print("NULL");
2885          else obj->print_value_on(&st);
2886          st.print(")");
2887          return st.as_string();
2888        }
2889        case relocInfo::metadata_type: {
2890          stringStream st;
2891          metadata_Relocation* r = iter.metadata_reloc();
2892          Metadata* obj = r->metadata_value();
2893          st.print("metadata(");
2894          if (obj == NULL) st.print("NULL");
2895          else obj->print_value_on(&st);
2896          st.print(")");
2897          return st.as_string();
2898        }
2899        case relocInfo::virtual_call_type:     return "virtual_call";
2900        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
2901        case relocInfo::static_call_type:      return "static_call";
2902        case relocInfo::static_stub_type:      return "static_stub";
2903        case relocInfo::runtime_call_type:     return "runtime_call";
2904        case relocInfo::external_word_type:    return "external_word";
2905        case relocInfo::internal_word_type:    return "internal_word";
2906        case relocInfo::section_word_type:     return "section_word";
2907        case relocInfo::poll_type:             return "poll";
2908        case relocInfo::poll_return_type:      return "poll_return";
2909        case relocInfo::type_mask:             return "type_bit_mask";
2910    }
2911  }
2912  return have_one ? "other" : NULL;
2913}
2914
2915// Return a the last scope in (begin..end]
2916ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
2917  PcDesc* p = pc_desc_near(begin+1);
2918  if (p != NULL && p->real_pc(this) <= end) {
2919    return new ScopeDesc(this, p->scope_decode_offset(),
2920                         p->obj_decode_offset(), p->should_reexecute(),
2921                         p->return_oop());
2922  }
2923  return NULL;
2924}
2925
2926void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
2927  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
2928  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
2929  if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
2930  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
2931  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
2932
2933  if (has_method_handle_invokes())
2934    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
2935
2936  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
2937
2938  if (block_begin == entry_point()) {
2939    methodHandle m = method();
2940    if (m.not_null()) {
2941      stream->print("  # ");
2942      m->print_value_on(stream);
2943      stream->cr();
2944    }
2945    if (m.not_null() && !is_osr_method()) {
2946      ResourceMark rm;
2947      int sizeargs = m->size_of_parameters();
2948      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
2949      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
2950      {
2951        int sig_index = 0;
2952        if (!m->is_static())
2953          sig_bt[sig_index++] = T_OBJECT; // 'this'
2954        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
2955          BasicType t = ss.type();
2956          sig_bt[sig_index++] = t;
2957          if (type2size[t] == 2) {
2958            sig_bt[sig_index++] = T_VOID;
2959          } else {
2960            assert(type2size[t] == 1, "size is 1 or 2");
2961          }
2962        }
2963        assert(sig_index == sizeargs, "");
2964      }
2965      const char* spname = "sp"; // make arch-specific?
2966      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
2967      int stack_slot_offset = this->frame_size() * wordSize;
2968      int tab1 = 14, tab2 = 24;
2969      int sig_index = 0;
2970      int arg_index = (m->is_static() ? 0 : -1);
2971      bool did_old_sp = false;
2972      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
2973        bool at_this = (arg_index == -1);
2974        bool at_old_sp = false;
2975        BasicType t = (at_this ? T_OBJECT : ss.type());
2976        assert(t == sig_bt[sig_index], "sigs in sync");
2977        if (at_this)
2978          stream->print("  # this: ");
2979        else
2980          stream->print("  # parm%d: ", arg_index);
2981        stream->move_to(tab1);
2982        VMReg fst = regs[sig_index].first();
2983        VMReg snd = regs[sig_index].second();
2984        if (fst->is_reg()) {
2985          stream->print("%s", fst->name());
2986          if (snd->is_valid())  {
2987            stream->print(":%s", snd->name());
2988          }
2989        } else if (fst->is_stack()) {
2990          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
2991          if (offset == stack_slot_offset)  at_old_sp = true;
2992          stream->print("[%s+0x%x]", spname, offset);
2993        } else {
2994          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
2995        }
2996        stream->print(" ");
2997        stream->move_to(tab2);
2998        stream->print("= ");
2999        if (at_this) {
3000          m->method_holder()->print_value_on(stream);
3001        } else {
3002          bool did_name = false;
3003          if (!at_this && ss.is_object()) {
3004            Symbol* name = ss.as_symbol_or_null();
3005            if (name != NULL) {
3006              name->print_value_on(stream);
3007              did_name = true;
3008            }
3009          }
3010          if (!did_name)
3011            stream->print("%s", type2name(t));
3012        }
3013        if (at_old_sp) {
3014          stream->print("  (%s of caller)", spname);
3015          did_old_sp = true;
3016        }
3017        stream->cr();
3018        sig_index += type2size[t];
3019        arg_index += 1;
3020        if (!at_this)  ss.next();
3021      }
3022      if (!did_old_sp) {
3023        stream->print("  # ");
3024        stream->move_to(tab1);
3025        stream->print("[%s+0x%x]", spname, stack_slot_offset);
3026        stream->print("  (%s of caller)", spname);
3027        stream->cr();
3028      }
3029    }
3030  }
3031}
3032
3033void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
3034  // First, find an oopmap in (begin, end].
3035  // We use the odd half-closed interval so that oop maps and scope descs
3036  // which are tied to the byte after a call are printed with the call itself.
3037  address base = code_begin();
3038  OopMapSet* oms = oop_maps();
3039  if (oms != NULL) {
3040    for (int i = 0, imax = oms->size(); i < imax; i++) {
3041      OopMap* om = oms->at(i);
3042      address pc = base + om->offset();
3043      if (pc > begin) {
3044        if (pc <= end) {
3045          st->move_to(column);
3046          st->print("; ");
3047          om->print_on(st);
3048        }
3049        break;
3050      }
3051    }
3052  }
3053
3054  // Print any debug info present at this pc.
3055  ScopeDesc* sd  = scope_desc_in(begin, end);
3056  if (sd != NULL) {
3057    st->move_to(column);
3058    if (sd->bci() == SynchronizationEntryBCI) {
3059      st->print(";*synchronization entry");
3060    } else {
3061      if (sd->method() == NULL) {
3062        st->print("method is NULL");
3063      } else if (sd->method()->is_native()) {
3064        st->print("method is native");
3065      } else {
3066        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3067        st->print(";*%s", Bytecodes::name(bc));
3068        switch (bc) {
3069        case Bytecodes::_invokevirtual:
3070        case Bytecodes::_invokespecial:
3071        case Bytecodes::_invokestatic:
3072        case Bytecodes::_invokeinterface:
3073          {
3074            Bytecode_invoke invoke(sd->method(), sd->bci());
3075            st->print(" ");
3076            if (invoke.name() != NULL)
3077              invoke.name()->print_symbol_on(st);
3078            else
3079              st->print("<UNKNOWN>");
3080            break;
3081          }
3082        case Bytecodes::_getfield:
3083        case Bytecodes::_putfield:
3084        case Bytecodes::_getstatic:
3085        case Bytecodes::_putstatic:
3086          {
3087            Bytecode_field field(sd->method(), sd->bci());
3088            st->print(" ");
3089            if (field.name() != NULL)
3090              field.name()->print_symbol_on(st);
3091            else
3092              st->print("<UNKNOWN>");
3093          }
3094        }
3095      }
3096    }
3097
3098    // Print all scopes
3099    for (;sd != NULL; sd = sd->sender()) {
3100      st->move_to(column);
3101      st->print("; -");
3102      if (sd->method() == NULL) {
3103        st->print("method is NULL");
3104      } else {
3105        sd->method()->print_short_name(st);
3106      }
3107      int lineno = sd->method()->line_number_from_bci(sd->bci());
3108      if (lineno != -1) {
3109        st->print("@%d (line %d)", sd->bci(), lineno);
3110      } else {
3111        st->print("@%d", sd->bci());
3112      }
3113      st->cr();
3114    }
3115  }
3116
3117  // Print relocation information
3118  const char* str = reloc_string_for(begin, end);
3119  if (str != NULL) {
3120    if (sd != NULL) st->cr();
3121    st->move_to(column);
3122    st->print(";   {%s}", str);
3123  }
3124  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
3125  if (cont_offset != 0) {
3126    st->move_to(column);
3127    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset);
3128  }
3129
3130}
3131
3132#ifndef PRODUCT
3133
3134void nmethod::print_value_on(outputStream* st) const {
3135  st->print("nmethod");
3136  print_on(st, NULL);
3137}
3138
3139void nmethod::print_calls(outputStream* st) {
3140  RelocIterator iter(this);
3141  while (iter.next()) {
3142    switch (iter.type()) {
3143    case relocInfo::virtual_call_type:
3144    case relocInfo::opt_virtual_call_type: {
3145      VerifyMutexLocker mc(CompiledIC_lock);
3146      CompiledIC_at(&iter)->print();
3147      break;
3148    }
3149    case relocInfo::static_call_type:
3150      st->print_cr("Static call at " INTPTR_FORMAT, iter.reloc()->addr());
3151      compiledStaticCall_at(iter.reloc())->print();
3152      break;
3153    }
3154  }
3155}
3156
3157void nmethod::print_handler_table() {
3158  ExceptionHandlerTable(this).print();
3159}
3160
3161void nmethod::print_nul_chk_table() {
3162  ImplicitExceptionTable(this).print(code_begin());
3163}
3164
3165void nmethod::print_statistics() {
3166  ttyLocker ttyl;
3167  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3168  nmethod_stats.print_native_nmethod_stats();
3169  nmethod_stats.print_nmethod_stats();
3170  DebugInformationRecorder::print_statistics();
3171  nmethod_stats.print_pc_stats();
3172  Dependencies::print_statistics();
3173  if (xtty != NULL)  xtty->tail("statistics");
3174}
3175
3176#endif // PRODUCT
3177