nmethod.cpp revision 9149:a8a8604f890f
1/*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeCache.hpp"
27#include "code/compiledIC.hpp"
28#include "code/dependencies.hpp"
29#include "code/nativeInst.hpp"
30#include "code/nmethod.hpp"
31#include "code/scopeDesc.hpp"
32#include "compiler/abstractCompiler.hpp"
33#include "compiler/compileBroker.hpp"
34#include "compiler/compileLog.hpp"
35#include "compiler/compilerOracle.hpp"
36#include "compiler/disassembler.hpp"
37#include "interpreter/bytecode.hpp"
38#include "oops/methodData.hpp"
39#include "oops/oop.inline.hpp"
40#include "prims/jvmtiRedefineClassesTrace.hpp"
41#include "prims/jvmtiImpl.hpp"
42#include "runtime/atomic.inline.hpp"
43#include "runtime/orderAccess.inline.hpp"
44#include "runtime/sharedRuntime.hpp"
45#include "runtime/sweeper.hpp"
46#include "utilities/resourceHash.hpp"
47#include "utilities/dtrace.hpp"
48#include "utilities/events.hpp"
49#include "utilities/xmlstream.hpp"
50#ifdef TARGET_ARCH_x86
51# include "nativeInst_x86.hpp"
52#endif
53#ifdef TARGET_ARCH_sparc
54# include "nativeInst_sparc.hpp"
55#endif
56#ifdef TARGET_ARCH_zero
57# include "nativeInst_zero.hpp"
58#endif
59#ifdef TARGET_ARCH_arm
60# include "nativeInst_arm.hpp"
61#endif
62#ifdef TARGET_ARCH_ppc
63# include "nativeInst_ppc.hpp"
64#endif
65#ifdef SHARK
66#include "shark/sharkCompiler.hpp"
67#endif
68#if INCLUDE_JVMCI
69#include "jvmci/jvmciJavaClasses.hpp"
70#endif
71
72unsigned char nmethod::_global_unloading_clock = 0;
73
74#ifdef DTRACE_ENABLED
75
76// Only bother with this argument setup if dtrace is available
77
78#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
79  {                                                                       \
80    Method* m = (method);                                                 \
81    if (m != NULL) {                                                      \
82      Symbol* klass_name = m->klass_name();                               \
83      Symbol* name = m->name();                                           \
84      Symbol* signature = m->signature();                                 \
85      HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
86        (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
87        (char *) name->bytes(), name->utf8_length(),                               \
88        (char *) signature->bytes(), signature->utf8_length());                    \
89    }                                                                     \
90  }
91
92#else //  ndef DTRACE_ENABLED
93
94#define DTRACE_METHOD_UNLOAD_PROBE(method)
95
96#endif
97
98bool nmethod::is_compiled_by_c1() const {
99  if (compiler() == NULL) {
100    return false;
101  }
102  return compiler()->is_c1();
103}
104bool nmethod::is_compiled_by_jvmci() const {
105  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
106  if (is_native_method()) return false;
107  return compiler()->is_jvmci();
108}
109bool nmethod::is_compiled_by_c2() const {
110  if (compiler() == NULL) {
111    return false;
112  }
113  return compiler()->is_c2();
114}
115bool nmethod::is_compiled_by_shark() const {
116  if (compiler() == NULL) {
117    return false;
118  }
119  return compiler()->is_shark();
120}
121
122
123
124//---------------------------------------------------------------------------------
125// NMethod statistics
126// They are printed under various flags, including:
127//   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
128// (In the latter two cases, they like other stats are printed to the log only.)
129
130#ifndef PRODUCT
131// These variables are put into one block to reduce relocations
132// and make it simpler to print from the debugger.
133struct java_nmethod_stats_struct {
134  int nmethod_count;
135  int total_size;
136  int relocation_size;
137  int consts_size;
138  int insts_size;
139  int stub_size;
140  int scopes_data_size;
141  int scopes_pcs_size;
142  int dependencies_size;
143  int handler_table_size;
144  int nul_chk_table_size;
145  int oops_size;
146  int metadata_size;
147
148  void note_nmethod(nmethod* nm) {
149    nmethod_count += 1;
150    total_size          += nm->size();
151    relocation_size     += nm->relocation_size();
152    consts_size         += nm->consts_size();
153    insts_size          += nm->insts_size();
154    stub_size           += nm->stub_size();
155    oops_size           += nm->oops_size();
156    metadata_size       += nm->metadata_size();
157    scopes_data_size    += nm->scopes_data_size();
158    scopes_pcs_size     += nm->scopes_pcs_size();
159    dependencies_size   += nm->dependencies_size();
160    handler_table_size  += nm->handler_table_size();
161    nul_chk_table_size  += nm->nul_chk_table_size();
162  }
163  void print_nmethod_stats(const char* name) {
164    if (nmethod_count == 0)  return;
165    tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name);
166    if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
167    if (nmethod_count != 0)       tty->print_cr(" header         = " SIZE_FORMAT, nmethod_count * sizeof(nmethod));
168    if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
169    if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
170    if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
171    if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
172    if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
173    if (metadata_size != 0)       tty->print_cr(" metadata       = %d", metadata_size);
174    if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
175    if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
176    if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
177    if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
178    if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
179  }
180};
181
182struct native_nmethod_stats_struct {
183  int native_nmethod_count;
184  int native_total_size;
185  int native_relocation_size;
186  int native_insts_size;
187  int native_oops_size;
188  int native_metadata_size;
189  void note_native_nmethod(nmethod* nm) {
190    native_nmethod_count += 1;
191    native_total_size       += nm->size();
192    native_relocation_size  += nm->relocation_size();
193    native_insts_size       += nm->insts_size();
194    native_oops_size        += nm->oops_size();
195    native_metadata_size    += nm->metadata_size();
196  }
197  void print_native_nmethod_stats() {
198    if (native_nmethod_count == 0)  return;
199    tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
200    if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
201    if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
202    if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
203    if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
204    if (native_metadata_size != 0)    tty->print_cr(" N. metadata    = %d", native_metadata_size);
205  }
206};
207
208struct pc_nmethod_stats_struct {
209  int pc_desc_resets;   // number of resets (= number of caches)
210  int pc_desc_queries;  // queries to nmethod::find_pc_desc
211  int pc_desc_approx;   // number of those which have approximate true
212  int pc_desc_repeats;  // number of _pc_descs[0] hits
213  int pc_desc_hits;     // number of LRU cache hits
214  int pc_desc_tests;    // total number of PcDesc examinations
215  int pc_desc_searches; // total number of quasi-binary search steps
216  int pc_desc_adds;     // number of LUR cache insertions
217
218  void print_pc_stats() {
219    tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
220                  pc_desc_queries,
221                  (double)(pc_desc_tests + pc_desc_searches)
222                  / pc_desc_queries);
223    tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
224                  pc_desc_resets,
225                  pc_desc_queries, pc_desc_approx,
226                  pc_desc_repeats, pc_desc_hits,
227                  pc_desc_tests, pc_desc_searches, pc_desc_adds);
228  }
229};
230
231#ifdef COMPILER1
232static java_nmethod_stats_struct c1_java_nmethod_stats;
233#endif
234#ifdef COMPILER2
235static java_nmethod_stats_struct c2_java_nmethod_stats;
236#endif
237#if INCLUDE_JVMCI
238static java_nmethod_stats_struct jvmci_java_nmethod_stats;
239#endif
240#ifdef SHARK
241static java_nmethod_stats_struct shark_java_nmethod_stats;
242#endif
243static java_nmethod_stats_struct unknown_java_nmethod_stats;
244
245static native_nmethod_stats_struct native_nmethod_stats;
246static pc_nmethod_stats_struct pc_nmethod_stats;
247
248static void note_java_nmethod(nmethod* nm) {
249#ifdef COMPILER1
250  if (nm->is_compiled_by_c1()) {
251    c1_java_nmethod_stats.note_nmethod(nm);
252  } else
253#endif
254#ifdef COMPILER2
255  if (nm->is_compiled_by_c2()) {
256    c2_java_nmethod_stats.note_nmethod(nm);
257  } else
258#endif
259#if INCLUDE_JVMCI
260  if (nm->is_compiled_by_jvmci()) {
261    jvmci_java_nmethod_stats.note_nmethod(nm);
262  } else
263#endif
264#ifdef SHARK
265  if (nm->is_compiled_by_shark()) {
266    shark_java_nmethod_stats.note_nmethod(nm);
267  } else
268#endif
269  {
270    unknown_java_nmethod_stats.note_nmethod(nm);
271  }
272}
273#endif // !PRODUCT
274
275//---------------------------------------------------------------------------------
276
277
278ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
279  assert(pc != NULL, "Must be non null");
280  assert(exception.not_null(), "Must be non null");
281  assert(handler != NULL, "Must be non null");
282
283  _count = 0;
284  _exception_type = exception->klass();
285  _next = NULL;
286
287  add_address_and_handler(pc,handler);
288}
289
290
291address ExceptionCache::match(Handle exception, address pc) {
292  assert(pc != NULL,"Must be non null");
293  assert(exception.not_null(),"Must be non null");
294  if (exception->klass() == exception_type()) {
295    return (test_address(pc));
296  }
297
298  return NULL;
299}
300
301
302bool ExceptionCache::match_exception_with_space(Handle exception) {
303  assert(exception.not_null(),"Must be non null");
304  if (exception->klass() == exception_type() && count() < cache_size) {
305    return true;
306  }
307  return false;
308}
309
310
311address ExceptionCache::test_address(address addr) {
312  for (int i=0; i<count(); i++) {
313    if (pc_at(i) == addr) {
314      return handler_at(i);
315    }
316  }
317  return NULL;
318}
319
320
321bool ExceptionCache::add_address_and_handler(address addr, address handler) {
322  if (test_address(addr) == handler) return true;
323  if (count() < cache_size) {
324    set_pc_at(count(),addr);
325    set_handler_at(count(), handler);
326    increment_count();
327    return true;
328  }
329  return false;
330}
331
332
333// private method for handling exception cache
334// These methods are private, and used to manipulate the exception cache
335// directly.
336ExceptionCache* nmethod::exception_cache_entry_for_exception(Handle exception) {
337  ExceptionCache* ec = exception_cache();
338  while (ec != NULL) {
339    if (ec->match_exception_with_space(exception)) {
340      return ec;
341    }
342    ec = ec->next();
343  }
344  return NULL;
345}
346
347
348//-----------------------------------------------------------------------------
349
350
351// Helper used by both find_pc_desc methods.
352static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
353  NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
354  if (!approximate)
355    return pc->pc_offset() == pc_offset;
356  else
357    return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
358}
359
360void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
361  if (initial_pc_desc == NULL) {
362    _pc_descs[0] = NULL; // native method; no PcDescs at all
363    return;
364  }
365  NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets);
366  // reset the cache by filling it with benign (non-null) values
367  assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
368  for (int i = 0; i < cache_size; i++)
369    _pc_descs[i] = initial_pc_desc;
370}
371
372PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
373  NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries);
374  NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx);
375
376  // Note: one might think that caching the most recently
377  // read value separately would be a win, but one would be
378  // wrong.  When many threads are updating it, the cache
379  // line it's in would bounce between caches, negating
380  // any benefit.
381
382  // In order to prevent race conditions do not load cache elements
383  // repeatedly, but use a local copy:
384  PcDesc* res;
385
386  // Step one:  Check the most recently added value.
387  res = _pc_descs[0];
388  if (res == NULL) return NULL;  // native method; no PcDescs at all
389  if (match_desc(res, pc_offset, approximate)) {
390    NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
391    return res;
392  }
393
394  // Step two:  Check the rest of the LRU cache.
395  for (int i = 1; i < cache_size; ++i) {
396    res = _pc_descs[i];
397    if (res->pc_offset() < 0) break;  // optimization: skip empty cache
398    if (match_desc(res, pc_offset, approximate)) {
399      NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
400      return res;
401    }
402  }
403
404  // Report failure.
405  return NULL;
406}
407
408void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
409  NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
410  // Update the LRU cache by shifting pc_desc forward.
411  for (int i = 0; i < cache_size; i++)  {
412    PcDesc* next = _pc_descs[i];
413    _pc_descs[i] = pc_desc;
414    pc_desc = next;
415  }
416}
417
418// adjust pcs_size so that it is a multiple of both oopSize and
419// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
420// of oopSize, then 2*sizeof(PcDesc) is)
421static int adjust_pcs_size(int pcs_size) {
422  int nsize = round_to(pcs_size,   oopSize);
423  if ((nsize % sizeof(PcDesc)) != 0) {
424    nsize = pcs_size + sizeof(PcDesc);
425  }
426  assert((nsize % oopSize) == 0, "correct alignment");
427  return nsize;
428}
429
430//-----------------------------------------------------------------------------
431
432
433void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) {
434  assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
435  assert(new_entry != NULL,"Must be non null");
436  assert(new_entry->next() == NULL, "Must be null");
437
438  if (exception_cache() != NULL) {
439    new_entry->set_next(exception_cache());
440  }
441  set_exception_cache(new_entry);
442}
443
444void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) {
445  ExceptionCache* prev = NULL;
446  ExceptionCache* curr = exception_cache();
447
448  while (curr != NULL) {
449    ExceptionCache* next = curr->next();
450
451    Klass* ex_klass = curr->exception_type();
452    if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
453      if (prev == NULL) {
454        set_exception_cache(next);
455      } else {
456        prev->set_next(next);
457      }
458      delete curr;
459      // prev stays the same.
460    } else {
461      prev = curr;
462    }
463
464    curr = next;
465  }
466}
467
468// public method for accessing the exception cache
469// These are the public access methods.
470address nmethod::handler_for_exception_and_pc(Handle exception, address pc) {
471  // We never grab a lock to read the exception cache, so we may
472  // have false negatives. This is okay, as it can only happen during
473  // the first few exception lookups for a given nmethod.
474  ExceptionCache* ec = exception_cache();
475  while (ec != NULL) {
476    address ret_val;
477    if ((ret_val = ec->match(exception,pc)) != NULL) {
478      return ret_val;
479    }
480    ec = ec->next();
481  }
482  return NULL;
483}
484
485
486void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
487  // There are potential race conditions during exception cache updates, so we
488  // must own the ExceptionCache_lock before doing ANY modifications. Because
489  // we don't lock during reads, it is possible to have several threads attempt
490  // to update the cache with the same data. We need to check for already inserted
491  // copies of the current data before adding it.
492
493  MutexLocker ml(ExceptionCache_lock);
494  ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
495
496  if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
497    target_entry = new ExceptionCache(exception,pc,handler);
498    add_exception_cache_entry(target_entry);
499  }
500}
501
502
503//-------------end of code for ExceptionCache--------------
504
505
506int nmethod::total_size() const {
507  return
508    consts_size()        +
509    insts_size()         +
510    stub_size()          +
511    scopes_data_size()   +
512    scopes_pcs_size()    +
513    handler_table_size() +
514    nul_chk_table_size();
515}
516
517const char* nmethod::compile_kind() const {
518  if (is_osr_method())     return "osr";
519  if (method() != NULL && is_native_method())  return "c2n";
520  return NULL;
521}
522
523// Fill in default values for various flag fields
524void nmethod::init_defaults() {
525  _state                      = in_use;
526  _unloading_clock            = 0;
527  _marked_for_reclamation     = 0;
528  _has_flushed_dependencies   = 0;
529  _has_unsafe_access          = 0;
530  _has_method_handle_invokes  = 0;
531  _lazy_critical_native       = 0;
532  _has_wide_vectors           = 0;
533  _marked_for_deoptimization  = 0;
534  _lock_count                 = 0;
535  _stack_traversal_mark       = 0;
536  _unload_reported            = false; // jvmti state
537
538#ifdef ASSERT
539  _oops_are_stale             = false;
540#endif
541
542  _oops_do_mark_link       = NULL;
543  _jmethod_id              = NULL;
544  _osr_link                = NULL;
545  if (UseG1GC) {
546    _unloading_next        = NULL;
547  } else {
548    _scavenge_root_link    = NULL;
549  }
550  _scavenge_root_state     = 0;
551  _compiler                = NULL;
552#if INCLUDE_RTM_OPT
553  _rtm_state               = NoRTM;
554#endif
555#if INCLUDE_JVMCI
556  _jvmci_installed_code   = NULL;
557  _speculation_log        = NULL;
558#endif
559}
560
561nmethod* nmethod::new_native_nmethod(methodHandle method,
562  int compile_id,
563  CodeBuffer *code_buffer,
564  int vep_offset,
565  int frame_complete,
566  int frame_size,
567  ByteSize basic_lock_owner_sp_offset,
568  ByteSize basic_lock_sp_offset,
569  OopMapSet* oop_maps) {
570  code_buffer->finalize_oop_references(method);
571  // create nmethod
572  nmethod* nm = NULL;
573  {
574    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
575    int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
576    CodeOffsets offsets;
577    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
578    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
579    nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
580                                            compile_id, &offsets,
581                                            code_buffer, frame_size,
582                                            basic_lock_owner_sp_offset,
583                                            basic_lock_sp_offset, oop_maps);
584    NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
585    if ((PrintAssembly || CompilerOracle::should_print(method)) && nm != NULL) {
586      Disassembler::decode(nm);
587    }
588  }
589  // verify nmethod
590  debug_only(if (nm) nm->verify();) // might block
591
592  if (nm != NULL) {
593    nm->log_new_nmethod();
594  }
595
596  return nm;
597}
598
599nmethod* nmethod::new_nmethod(methodHandle method,
600  int compile_id,
601  int entry_bci,
602  CodeOffsets* offsets,
603  int orig_pc_offset,
604  DebugInformationRecorder* debug_info,
605  Dependencies* dependencies,
606  CodeBuffer* code_buffer, int frame_size,
607  OopMapSet* oop_maps,
608  ExceptionHandlerTable* handler_table,
609  ImplicitExceptionTable* nul_chk_table,
610  AbstractCompiler* compiler,
611  int comp_level
612#if INCLUDE_JVMCI
613  , Handle installed_code,
614  Handle speculationLog
615#endif
616)
617{
618  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
619  code_buffer->finalize_oop_references(method);
620  // create nmethod
621  nmethod* nm = NULL;
622  { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
623    int nmethod_size =
624      allocation_size(code_buffer, sizeof(nmethod))
625      + adjust_pcs_size(debug_info->pcs_size())
626      + round_to(dependencies->size_in_bytes() , oopSize)
627      + round_to(handler_table->size_in_bytes(), oopSize)
628      + round_to(nul_chk_table->size_in_bytes(), oopSize)
629      + round_to(debug_info->data_size()       , oopSize);
630
631    nm = new (nmethod_size, comp_level)
632    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
633            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
634            oop_maps,
635            handler_table,
636            nul_chk_table,
637            compiler,
638            comp_level
639#if INCLUDE_JVMCI
640            , installed_code,
641            speculationLog
642#endif
643            );
644
645    if (nm != NULL) {
646      // To make dependency checking during class loading fast, record
647      // the nmethod dependencies in the classes it is dependent on.
648      // This allows the dependency checking code to simply walk the
649      // class hierarchy above the loaded class, checking only nmethods
650      // which are dependent on those classes.  The slow way is to
651      // check every nmethod for dependencies which makes it linear in
652      // the number of methods compiled.  For applications with a lot
653      // classes the slow way is too slow.
654      for (Dependencies::DepStream deps(nm); deps.next(); ) {
655        if (deps.type() == Dependencies::call_site_target_value) {
656          // CallSite dependencies are managed on per-CallSite instance basis.
657          oop call_site = deps.argument_oop(0);
658          MethodHandles::add_dependent_nmethod(call_site, nm);
659        } else {
660          Klass* klass = deps.context_type();
661          if (klass == NULL) {
662            continue;  // ignore things like evol_method
663          }
664          // record this nmethod as dependent on this klass
665          InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
666        }
667      }
668      NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
669      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
670        Disassembler::decode(nm);
671      }
672    }
673  }
674  // Do verification and logging outside CodeCache_lock.
675  if (nm != NULL) {
676    // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
677    DEBUG_ONLY(nm->verify();)
678    nm->log_new_nmethod();
679  }
680  return nm;
681}
682
683#ifdef _MSC_VER
684#pragma warning(push)
685#pragma warning(disable:4355) //  warning C4355: 'this' : used in base member initializer list
686#endif
687// For native wrappers
688nmethod::nmethod(
689  Method* method,
690  int nmethod_size,
691  int compile_id,
692  CodeOffsets* offsets,
693  CodeBuffer* code_buffer,
694  int frame_size,
695  ByteSize basic_lock_owner_sp_offset,
696  ByteSize basic_lock_sp_offset,
697  OopMapSet* oop_maps )
698  : CodeBlob("native nmethod", code_buffer, sizeof(nmethod),
699             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
700  _native_receiver_sp_offset(basic_lock_owner_sp_offset),
701  _native_basic_lock_sp_offset(basic_lock_sp_offset)
702{
703  {
704    debug_only(No_Safepoint_Verifier nsv;)
705    assert_locked_or_safepoint(CodeCache_lock);
706
707    init_defaults();
708    _method                  = method;
709    _entry_bci               = InvocationEntryBci;
710    // We have no exception handler or deopt handler make the
711    // values something that will never match a pc like the nmethod vtable entry
712    _exception_offset        = 0;
713    _deoptimize_offset       = 0;
714    _deoptimize_mh_offset    = 0;
715    _orig_pc_offset          = 0;
716
717    _consts_offset           = data_offset();
718    _stub_offset             = data_offset();
719    _oops_offset             = data_offset();
720    _metadata_offset         = _oops_offset         + round_to(code_buffer->total_oop_size(), oopSize);
721    _scopes_data_offset      = _metadata_offset     + round_to(code_buffer->total_metadata_size(), wordSize);
722    _scopes_pcs_offset       = _scopes_data_offset;
723    _dependencies_offset     = _scopes_pcs_offset;
724    _handler_table_offset    = _dependencies_offset;
725    _nul_chk_table_offset    = _handler_table_offset;
726    _nmethod_end_offset      = _nul_chk_table_offset;
727    _compile_id              = compile_id;
728    _comp_level              = CompLevel_none;
729    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
730    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
731    _osr_entry_point         = NULL;
732    _exception_cache         = NULL;
733    _pc_desc_cache.reset_to(NULL);
734    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
735
736    code_buffer->copy_values_to(this);
737    if (ScavengeRootsInCode) {
738      if (detect_scavenge_root_oops()) {
739        CodeCache::add_scavenge_root_nmethod(this);
740      }
741      Universe::heap()->register_nmethod(this);
742    }
743    debug_only(verify_scavenge_root_oops());
744    CodeCache::commit(this);
745  }
746
747  if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
748    ttyLocker ttyl;  // keep the following output all in one block
749    // This output goes directly to the tty, not the compiler log.
750    // To enable tools to match it up with the compilation activity,
751    // be sure to tag this tty output with the compile ID.
752    if (xtty != NULL) {
753      xtty->begin_head("print_native_nmethod");
754      xtty->method(_method);
755      xtty->stamp();
756      xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
757    }
758    // print the header part first
759    print();
760    // then print the requested information
761    if (PrintNativeNMethods) {
762      print_code();
763      if (oop_maps != NULL) {
764        oop_maps->print();
765      }
766    }
767    if (PrintRelocations) {
768      print_relocations();
769    }
770    if (xtty != NULL) {
771      xtty->tail("print_native_nmethod");
772    }
773  }
774}
775
776#ifdef _MSC_VER
777#pragma warning(pop)
778#endif
779
780void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
781  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
782}
783
784nmethod::nmethod(
785  Method* method,
786  int nmethod_size,
787  int compile_id,
788  int entry_bci,
789  CodeOffsets* offsets,
790  int orig_pc_offset,
791  DebugInformationRecorder* debug_info,
792  Dependencies* dependencies,
793  CodeBuffer *code_buffer,
794  int frame_size,
795  OopMapSet* oop_maps,
796  ExceptionHandlerTable* handler_table,
797  ImplicitExceptionTable* nul_chk_table,
798  AbstractCompiler* compiler,
799  int comp_level
800#if INCLUDE_JVMCI
801  , Handle installed_code,
802  Handle speculation_log
803#endif
804  )
805  : CodeBlob("nmethod", code_buffer, sizeof(nmethod),
806             nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps),
807  _native_receiver_sp_offset(in_ByteSize(-1)),
808  _native_basic_lock_sp_offset(in_ByteSize(-1))
809{
810  assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
811  {
812    debug_only(No_Safepoint_Verifier nsv;)
813    assert_locked_or_safepoint(CodeCache_lock);
814
815    init_defaults();
816    _method                  = method;
817    _entry_bci               = entry_bci;
818    _compile_id              = compile_id;
819    _comp_level              = comp_level;
820    _compiler                = compiler;
821    _orig_pc_offset          = orig_pc_offset;
822    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
823
824    // Section offsets
825    _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
826    _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
827
828#if INCLUDE_JVMCI
829    _jvmci_installed_code = installed_code();
830    _speculation_log = (instanceOop)speculation_log();
831
832    if (compiler->is_jvmci()) {
833      // JVMCI might not produce any stub sections
834      if (offsets->value(CodeOffsets::Exceptions) != -1) {
835        _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
836      } else {
837        _exception_offset = -1;
838      }
839      if (offsets->value(CodeOffsets::Deopt) != -1) {
840        _deoptimize_offset       = code_offset()          + offsets->value(CodeOffsets::Deopt);
841      } else {
842        _deoptimize_offset = -1;
843      }
844      if (offsets->value(CodeOffsets::DeoptMH) != -1) {
845        _deoptimize_mh_offset  = code_offset()          + offsets->value(CodeOffsets::DeoptMH);
846      } else {
847        _deoptimize_mh_offset  = -1;
848      }
849    } else {
850#endif
851    // Exception handler and deopt handler are in the stub section
852    assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
853    assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
854
855    _exception_offset        = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
856    _deoptimize_offset       = _stub_offset          + offsets->value(CodeOffsets::Deopt);
857    if (offsets->value(CodeOffsets::DeoptMH) != -1) {
858      _deoptimize_mh_offset  = _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
859    } else {
860      _deoptimize_mh_offset  = -1;
861#if INCLUDE_JVMCI
862    }
863#endif
864    }
865    if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
866      _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
867    } else {
868      _unwind_handler_offset = -1;
869    }
870
871    _oops_offset             = data_offset();
872    _metadata_offset         = _oops_offset          + round_to(code_buffer->total_oop_size(), oopSize);
873    _scopes_data_offset      = _metadata_offset      + round_to(code_buffer->total_metadata_size(), wordSize);
874
875    _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size       (), oopSize);
876    _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
877    _handler_table_offset    = _dependencies_offset  + round_to(dependencies->size_in_bytes (), oopSize);
878    _nul_chk_table_offset    = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize);
879    _nmethod_end_offset      = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize);
880
881    _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
882    _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
883    _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
884    _exception_cache         = NULL;
885    _pc_desc_cache.reset_to(scopes_pcs_begin());
886
887    // Copy contents of ScopeDescRecorder to nmethod
888    code_buffer->copy_values_to(this);
889    debug_info->copy_to(this);
890    dependencies->copy_to(this);
891    if (ScavengeRootsInCode) {
892      if (detect_scavenge_root_oops()) {
893        CodeCache::add_scavenge_root_nmethod(this);
894      }
895      Universe::heap()->register_nmethod(this);
896    }
897    debug_only(verify_scavenge_root_oops());
898
899    CodeCache::commit(this);
900
901    // Copy contents of ExceptionHandlerTable to nmethod
902    handler_table->copy_to(this);
903    nul_chk_table->copy_to(this);
904
905    // we use the information of entry points to find out if a method is
906    // static or non static
907    assert(compiler->is_c2() || compiler->is_jvmci() ||
908           _method->is_static() == (entry_point() == _verified_entry_point),
909           " entry points must be same for static methods and vice versa");
910  }
911
912  bool printnmethods = PrintNMethods || PrintNMethodsAtLevel == _comp_level
913    || CompilerOracle::should_print(_method)
914    || CompilerOracle::has_option_string(_method, "PrintNMethods");
915  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
916    print_nmethod(printnmethods);
917  }
918}
919
920// Print a short set of xml attributes to identify this nmethod.  The
921// output should be embedded in some other element.
922void nmethod::log_identity(xmlStream* log) const {
923  log->print(" compile_id='%d'", compile_id());
924  const char* nm_kind = compile_kind();
925  if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
926  if (compiler() != NULL) {
927    log->print(" compiler='%s'", compiler()->name());
928  }
929  if (TieredCompilation) {
930    log->print(" level='%d'", comp_level());
931  }
932}
933
934
935#define LOG_OFFSET(log, name)                    \
936  if (p2i(name##_end()) - p2i(name##_begin())) \
937    log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
938               p2i(name##_begin()) - p2i(this))
939
940
941void nmethod::log_new_nmethod() const {
942  if (LogCompilation && xtty != NULL) {
943    ttyLocker ttyl;
944    HandleMark hm;
945    xtty->begin_elem("nmethod");
946    log_identity(xtty);
947    xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
948    xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
949
950    LOG_OFFSET(xtty, relocation);
951    LOG_OFFSET(xtty, consts);
952    LOG_OFFSET(xtty, insts);
953    LOG_OFFSET(xtty, stub);
954    LOG_OFFSET(xtty, scopes_data);
955    LOG_OFFSET(xtty, scopes_pcs);
956    LOG_OFFSET(xtty, dependencies);
957    LOG_OFFSET(xtty, handler_table);
958    LOG_OFFSET(xtty, nul_chk_table);
959    LOG_OFFSET(xtty, oops);
960    LOG_OFFSET(xtty, metadata);
961
962    xtty->method(method());
963    xtty->stamp();
964    xtty->end_elem();
965  }
966}
967
968#undef LOG_OFFSET
969
970
971// Print out more verbose output usually for a newly created nmethod.
972void nmethod::print_on(outputStream* st, const char* msg) const {
973  if (st != NULL) {
974    ttyLocker ttyl;
975    if (WizardMode) {
976      CompileTask::print(st, this, msg, /*short_form:*/ true);
977      st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
978    } else {
979      CompileTask::print(st, this, msg, /*short_form:*/ false);
980    }
981  }
982}
983
984
985void nmethod::print_nmethod(bool printmethod) {
986  ttyLocker ttyl;  // keep the following output all in one block
987  if (xtty != NULL) {
988    xtty->begin_head("print_nmethod");
989    xtty->stamp();
990    xtty->end_head();
991  }
992  // print the header part first
993  print();
994  // then print the requested information
995  if (printmethod) {
996    print_code();
997    print_pcs();
998    if (oop_maps()) {
999      oop_maps()->print();
1000    }
1001  }
1002  if (PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) {
1003    print_scopes();
1004  }
1005  if (PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) {
1006    print_relocations();
1007  }
1008  if (PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) {
1009    print_dependencies();
1010  }
1011  if (PrintExceptionHandlers) {
1012    print_handler_table();
1013    print_nul_chk_table();
1014  }
1015  if (xtty != NULL) {
1016    xtty->tail("print_nmethod");
1017  }
1018}
1019
1020
1021// Promote one word from an assembly-time handle to a live embedded oop.
1022inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1023  if (handle == NULL ||
1024      // As a special case, IC oops are initialized to 1 or -1.
1025      handle == (jobject) Universe::non_oop_word()) {
1026    (*dest) = (oop) handle;
1027  } else {
1028    (*dest) = JNIHandles::resolve_non_null(handle);
1029  }
1030}
1031
1032
1033// Have to have the same name because it's called by a template
1034void nmethod::copy_values(GrowableArray<jobject>* array) {
1035  int length = array->length();
1036  assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1037  oop* dest = oops_begin();
1038  for (int index = 0 ; index < length; index++) {
1039    initialize_immediate_oop(&dest[index], array->at(index));
1040  }
1041
1042  // Now we can fix up all the oops in the code.  We need to do this
1043  // in the code because the assembler uses jobjects as placeholders.
1044  // The code and relocations have already been initialized by the
1045  // CodeBlob constructor, so it is valid even at this early point to
1046  // iterate over relocations and patch the code.
1047  fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1048}
1049
1050void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1051  int length = array->length();
1052  assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1053  Metadata** dest = metadata_begin();
1054  for (int index = 0 ; index < length; index++) {
1055    dest[index] = array->at(index);
1056  }
1057}
1058
1059bool nmethod::is_at_poll_return(address pc) {
1060  RelocIterator iter(this, pc, pc+1);
1061  while (iter.next()) {
1062    if (iter.type() == relocInfo::poll_return_type)
1063      return true;
1064  }
1065  return false;
1066}
1067
1068
1069bool nmethod::is_at_poll_or_poll_return(address pc) {
1070  RelocIterator iter(this, pc, pc+1);
1071  while (iter.next()) {
1072    relocInfo::relocType t = iter.type();
1073    if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
1074      return true;
1075  }
1076  return false;
1077}
1078
1079
1080void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1081  // re-patch all oop-bearing instructions, just in case some oops moved
1082  RelocIterator iter(this, begin, end);
1083  while (iter.next()) {
1084    if (iter.type() == relocInfo::oop_type) {
1085      oop_Relocation* reloc = iter.oop_reloc();
1086      if (initialize_immediates && reloc->oop_is_immediate()) {
1087        oop* dest = reloc->oop_addr();
1088        initialize_immediate_oop(dest, (jobject) *dest);
1089      }
1090      // Refresh the oop-related bits of this instruction.
1091      reloc->fix_oop_relocation();
1092    } else if (iter.type() == relocInfo::metadata_type) {
1093      metadata_Relocation* reloc = iter.metadata_reloc();
1094      reloc->fix_metadata_relocation();
1095    }
1096  }
1097}
1098
1099
1100void nmethod::verify_oop_relocations() {
1101  // Ensure sure that the code matches the current oop values
1102  RelocIterator iter(this, NULL, NULL);
1103  while (iter.next()) {
1104    if (iter.type() == relocInfo::oop_type) {
1105      oop_Relocation* reloc = iter.oop_reloc();
1106      if (!reloc->oop_is_immediate()) {
1107        reloc->verify_oop_relocation();
1108      }
1109    }
1110  }
1111}
1112
1113
1114ScopeDesc* nmethod::scope_desc_at(address pc) {
1115  PcDesc* pd = pc_desc_at(pc);
1116  guarantee(pd != NULL, "scope must be present");
1117  return new ScopeDesc(this, pd->scope_decode_offset(),
1118                       pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
1119                       pd->return_oop());
1120}
1121
1122
1123void nmethod::clear_inline_caches() {
1124  assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1125  if (is_zombie()) {
1126    return;
1127  }
1128
1129  RelocIterator iter(this);
1130  while (iter.next()) {
1131    iter.reloc()->clear_inline_cache();
1132  }
1133}
1134
1135// Clear ICStubs of all compiled ICs
1136void nmethod::clear_ic_stubs() {
1137  assert_locked_or_safepoint(CompiledIC_lock);
1138  RelocIterator iter(this);
1139  while(iter.next()) {
1140    if (iter.type() == relocInfo::virtual_call_type) {
1141      CompiledIC* ic = CompiledIC_at(&iter);
1142      ic->clear_ic_stub();
1143    }
1144  }
1145}
1146
1147
1148void nmethod::cleanup_inline_caches() {
1149  assert_locked_or_safepoint(CompiledIC_lock);
1150
1151  // If the method is not entrant or zombie then a JMP is plastered over the
1152  // first few bytes.  If an oop in the old code was there, that oop
1153  // should not get GC'd.  Skip the first few bytes of oops on
1154  // not-entrant methods.
1155  address low_boundary = verified_entry_point();
1156  if (!is_in_use()) {
1157    low_boundary += NativeJump::instruction_size;
1158    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1159    // This means that the low_boundary is going to be a little too high.
1160    // This shouldn't matter, since oops of non-entrant methods are never used.
1161    // In fact, why are we bothering to look at oops in a non-entrant method??
1162  }
1163
1164  // Find all calls in an nmethod and clear the ones that point to non-entrant,
1165  // zombie and unloaded nmethods.
1166  ResourceMark rm;
1167  RelocIterator iter(this, low_boundary);
1168  while(iter.next()) {
1169    switch(iter.type()) {
1170      case relocInfo::virtual_call_type:
1171      case relocInfo::opt_virtual_call_type: {
1172        CompiledIC *ic = CompiledIC_at(&iter);
1173        // Ok, to lookup references to zombies here
1174        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1175        if( cb != NULL && cb->is_nmethod() ) {
1176          nmethod* nm = (nmethod*)cb;
1177          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1178          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
1179        }
1180        break;
1181      }
1182      case relocInfo::static_call_type: {
1183        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1184        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1185        if( cb != NULL && cb->is_nmethod() ) {
1186          nmethod* nm = (nmethod*)cb;
1187          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
1188          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
1189        }
1190        break;
1191      }
1192    }
1193  }
1194}
1195
1196void nmethod::verify_clean_inline_caches() {
1197  assert_locked_or_safepoint(CompiledIC_lock);
1198
1199  // If the method is not entrant or zombie then a JMP is plastered over the
1200  // first few bytes.  If an oop in the old code was there, that oop
1201  // should not get GC'd.  Skip the first few bytes of oops on
1202  // not-entrant methods.
1203  address low_boundary = verified_entry_point();
1204  if (!is_in_use()) {
1205    low_boundary += NativeJump::instruction_size;
1206    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1207    // This means that the low_boundary is going to be a little too high.
1208    // This shouldn't matter, since oops of non-entrant methods are never used.
1209    // In fact, why are we bothering to look at oops in a non-entrant method??
1210  }
1211
1212  ResourceMark rm;
1213  RelocIterator iter(this, low_boundary);
1214  while(iter.next()) {
1215    switch(iter.type()) {
1216      case relocInfo::virtual_call_type:
1217      case relocInfo::opt_virtual_call_type: {
1218        CompiledIC *ic = CompiledIC_at(&iter);
1219        // Ok, to lookup references to zombies here
1220        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1221        if( cb != NULL && cb->is_nmethod() ) {
1222          nmethod* nm = (nmethod*)cb;
1223          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1224          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1225            assert(ic->is_clean(), "IC should be clean");
1226          }
1227        }
1228        break;
1229      }
1230      case relocInfo::static_call_type: {
1231        CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1232        CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1233        if( cb != NULL && cb->is_nmethod() ) {
1234          nmethod* nm = (nmethod*)cb;
1235          // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1236          if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1237            assert(csc->is_clean(), "IC should be clean");
1238          }
1239        }
1240        break;
1241      }
1242    }
1243  }
1244}
1245
1246int nmethod::verify_icholder_relocations() {
1247  int count = 0;
1248
1249  RelocIterator iter(this);
1250  while(iter.next()) {
1251    if (iter.type() == relocInfo::virtual_call_type) {
1252      if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
1253        CompiledIC *ic = CompiledIC_at(&iter);
1254        if (TraceCompiledIC) {
1255          tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
1256          ic->print();
1257        }
1258        assert(ic->cached_icholder() != NULL, "must be non-NULL");
1259        count++;
1260      }
1261    }
1262  }
1263
1264  return count;
1265}
1266
1267// This is a private interface with the sweeper.
1268void nmethod::mark_as_seen_on_stack() {
1269  assert(is_alive(), "Must be an alive method");
1270  // Set the traversal mark to ensure that the sweeper does 2
1271  // cleaning passes before moving to zombie.
1272  set_stack_traversal_mark(NMethodSweeper::traversal_count());
1273}
1274
1275// Tell if a non-entrant method can be converted to a zombie (i.e.,
1276// there are no activations on the stack, not in use by the VM,
1277// and not in use by the ServiceThread)
1278bool nmethod::can_convert_to_zombie() {
1279  assert(is_not_entrant(), "must be a non-entrant method");
1280
1281  // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1282  // count can be greater than the stack traversal count before it hits the
1283  // nmethod for the second time.
1284  return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
1285         !is_locked_by_vm();
1286}
1287
1288void nmethod::inc_decompile_count() {
1289  if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1290  // Could be gated by ProfileTraps, but do not bother...
1291  Method* m = method();
1292  if (m == NULL)  return;
1293  MethodData* mdo = m->method_data();
1294  if (mdo == NULL)  return;
1295  // There is a benign race here.  See comments in methodData.hpp.
1296  mdo->inc_decompile_count();
1297}
1298
1299void nmethod::increase_unloading_clock() {
1300  _global_unloading_clock++;
1301  if (_global_unloading_clock == 0) {
1302    // _nmethods are allocated with _unloading_clock == 0,
1303    // so 0 is never used as a clock value.
1304    _global_unloading_clock = 1;
1305  }
1306}
1307
1308void nmethod::set_unloading_clock(unsigned char unloading_clock) {
1309  OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
1310}
1311
1312unsigned char nmethod::unloading_clock() {
1313  return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
1314}
1315
1316void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
1317
1318  post_compiled_method_unload();
1319
1320  // Since this nmethod is being unloaded, make sure that dependencies
1321  // recorded in instanceKlasses get flushed and pass non-NULL closure to
1322  // indicate that this work is being done during a GC.
1323  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
1324  assert(is_alive != NULL, "Should be non-NULL");
1325  // A non-NULL is_alive closure indicates that this is being called during GC.
1326  flush_dependencies(is_alive);
1327
1328  // Break cycle between nmethod & method
1329  if (TraceClassUnloading && WizardMode) {
1330    tty->print_cr("[Class unloading: Making nmethod " INTPTR_FORMAT
1331                  " unloadable], Method*(" INTPTR_FORMAT
1332                  "), cause(" INTPTR_FORMAT ")",
1333                  p2i(this), p2i(_method), p2i(cause));
1334    if (!Universe::heap()->is_gc_active())
1335      cause->klass()->print();
1336  }
1337  // Unlink the osr method, so we do not look this up again
1338  if (is_osr_method()) {
1339    invalidate_osr_method();
1340  }
1341  // If _method is already NULL the Method* is about to be unloaded,
1342  // so we don't have to break the cycle. Note that it is possible to
1343  // have the Method* live here, in case we unload the nmethod because
1344  // it is pointing to some oop (other than the Method*) being unloaded.
1345  if (_method != NULL) {
1346    // OSR methods point to the Method*, but the Method* does not
1347    // point back!
1348    if (_method->code() == this) {
1349      _method->clear_code(); // Break a cycle
1350    }
1351    _method = NULL;            // Clear the method of this dead nmethod
1352  }
1353
1354  // Make the class unloaded - i.e., change state and notify sweeper
1355  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1356  if (is_in_use()) {
1357    // Transitioning directly from live to unloaded -- so
1358    // we need to force a cache clean-up; remember this
1359    // for later on.
1360    CodeCache::set_needs_cache_clean(true);
1361  }
1362
1363  // Unregister must be done before the state change
1364  Universe::heap()->unregister_nmethod(this);
1365
1366#if INCLUDE_JVMCI
1367  // The method can only be unloaded after the pointer to the installed code
1368  // Java wrapper is no longer alive. Here we need to clear out this weak
1369  // reference to the dead object. Nulling out the reference has to happen
1370  // after the method is unregistered since the original value may be still
1371  // tracked by the rset.
1372  if (_jvmci_installed_code != NULL) {
1373    InstalledCode::set_address(_jvmci_installed_code, 0);
1374    _jvmci_installed_code = NULL;
1375  }
1376#endif
1377
1378  _state = unloaded;
1379
1380  // Log the unloading.
1381  log_state_change();
1382
1383  // The Method* is gone at this point
1384  assert(_method == NULL, "Tautology");
1385
1386  set_osr_link(NULL);
1387  //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
1388  NMethodSweeper::report_state_change(this);
1389}
1390
1391void nmethod::invalidate_osr_method() {
1392  assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1393  // Remove from list of active nmethods
1394  if (method() != NULL)
1395    method()->method_holder()->remove_osr_nmethod(this);
1396}
1397
1398void nmethod::log_state_change() const {
1399  if (LogCompilation) {
1400    if (xtty != NULL) {
1401      ttyLocker ttyl;  // keep the following output all in one block
1402      if (_state == unloaded) {
1403        xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1404                         os::current_thread_id());
1405      } else {
1406        xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1407                         os::current_thread_id(),
1408                         (_state == zombie ? " zombie='1'" : ""));
1409      }
1410      log_identity(xtty);
1411      xtty->stamp();
1412      xtty->end_elem();
1413    }
1414  }
1415  if (PrintCompilation && _state != unloaded) {
1416    print_on(tty, _state == zombie ? "made zombie" : "made not entrant");
1417  }
1418}
1419
1420/**
1421 * Common functionality for both make_not_entrant and make_zombie
1422 */
1423bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
1424  assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1425  assert(!is_zombie(), "should not already be a zombie");
1426
1427  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
1428  nmethodLocker nml(this);
1429  methodHandle the_method(method());
1430  No_Safepoint_Verifier nsv;
1431
1432  // during patching, depending on the nmethod state we must notify the GC that
1433  // code has been unloaded, unregistering it. We cannot do this right while
1434  // holding the Patching_lock because we need to use the CodeCache_lock. This
1435  // would be prone to deadlocks.
1436  // This flag is used to remember whether we need to later lock and unregister.
1437  bool nmethod_needs_unregister = false;
1438
1439  {
1440    // invalidate osr nmethod before acquiring the patching lock since
1441    // they both acquire leaf locks and we don't want a deadlock.
1442    // This logic is equivalent to the logic below for patching the
1443    // verified entry point of regular methods.
1444    if (is_osr_method()) {
1445      // this effectively makes the osr nmethod not entrant
1446      invalidate_osr_method();
1447    }
1448
1449    // Enter critical section.  Does not block for safepoint.
1450    MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
1451
1452    if (_state == state) {
1453      // another thread already performed this transition so nothing
1454      // to do, but return false to indicate this.
1455      return false;
1456    }
1457
1458    // The caller can be calling the method statically or through an inline
1459    // cache call.
1460    if (!is_osr_method() && !is_not_entrant()) {
1461      NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1462                  SharedRuntime::get_handle_wrong_method_stub());
1463    }
1464
1465    if (is_in_use()) {
1466      // It's a true state change, so mark the method as decompiled.
1467      // Do it only for transition from alive.
1468      inc_decompile_count();
1469    }
1470
1471    // If the state is becoming a zombie, signal to unregister the nmethod with
1472    // the heap.
1473    // This nmethod may have already been unloaded during a full GC.
1474    if ((state == zombie) && !is_unloaded()) {
1475      nmethod_needs_unregister = true;
1476    }
1477
1478    // Must happen before state change. Otherwise we have a race condition in
1479    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
1480    // transition its state from 'not_entrant' to 'zombie' without having to wait
1481    // for stack scanning.
1482    if (state == not_entrant) {
1483      mark_as_seen_on_stack();
1484      OrderAccess::storestore();
1485    }
1486
1487    // Change state
1488    _state = state;
1489
1490    // Log the transition once
1491    log_state_change();
1492
1493    // Remove nmethod from method.
1494    // We need to check if both the _code and _from_compiled_code_entry_point
1495    // refer to this nmethod because there is a race in setting these two fields
1496    // in Method* as seen in bugid 4947125.
1497    // If the vep() points to the zombie nmethod, the memory for the nmethod
1498    // could be flushed and the compiler and vtable stubs could still call
1499    // through it.
1500    if (method() != NULL && (method()->code() == this ||
1501                             method()->from_compiled_entry() == verified_entry_point())) {
1502      HandleMark hm;
1503      method()->clear_code();
1504    }
1505  } // leave critical region under Patching_lock
1506
1507  // When the nmethod becomes zombie it is no longer alive so the
1508  // dependencies must be flushed.  nmethods in the not_entrant
1509  // state will be flushed later when the transition to zombie
1510  // happens or they get unloaded.
1511  if (state == zombie) {
1512    {
1513      // Flushing dependecies must be done before any possible
1514      // safepoint can sneak in, otherwise the oops used by the
1515      // dependency logic could have become stale.
1516      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1517      if (nmethod_needs_unregister) {
1518        Universe::heap()->unregister_nmethod(this);
1519      }
1520      flush_dependencies(NULL);
1521    }
1522
1523    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1524    // event and it hasn't already been reported for this nmethod then
1525    // report it now. The event may have been reported earilier if the GC
1526    // marked it for unloading). JvmtiDeferredEventQueue support means
1527    // we no longer go to a safepoint here.
1528    post_compiled_method_unload();
1529
1530#ifdef ASSERT
1531    // It's no longer safe to access the oops section since zombie
1532    // nmethods aren't scanned for GC.
1533    _oops_are_stale = true;
1534#endif
1535     // the Method may be reclaimed by class unloading now that the
1536     // nmethod is in zombie state
1537    set_method(NULL);
1538  } else {
1539    assert(state == not_entrant, "other cases may need to be handled differently");
1540  }
1541#if INCLUDE_JVMCI
1542  if (_jvmci_installed_code != NULL) {
1543    // Break the link between nmethod and InstalledCode such that the nmethod can subsequently be flushed safely.
1544    InstalledCode::set_address(_jvmci_installed_code, 0);
1545  }
1546#endif
1547
1548  if (TraceCreateZombies) {
1549    ResourceMark m;
1550    tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie");
1551  }
1552
1553  NMethodSweeper::report_state_change(this);
1554  return true;
1555}
1556
1557void nmethod::flush() {
1558  // Note that there are no valid oops in the nmethod anymore.
1559  assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
1560  assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
1561
1562  assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1563  assert_locked_or_safepoint(CodeCache_lock);
1564
1565  // completely deallocate this method
1566  Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
1567  if (PrintMethodFlushing) {
1568    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
1569                  "/Free CodeCache:" SIZE_FORMAT "Kb",
1570                  _compile_id, p2i(this), CodeCache::nof_blobs(),
1571                  CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1572  }
1573
1574  // We need to deallocate any ExceptionCache data.
1575  // Note that we do not need to grab the nmethod lock for this, it
1576  // better be thread safe if we're disposing of it!
1577  ExceptionCache* ec = exception_cache();
1578  set_exception_cache(NULL);
1579  while(ec != NULL) {
1580    ExceptionCache* next = ec->next();
1581    delete ec;
1582    ec = next;
1583  }
1584
1585  if (on_scavenge_root_list()) {
1586    CodeCache::drop_scavenge_root_nmethod(this);
1587  }
1588
1589#ifdef SHARK
1590  ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
1591#endif // SHARK
1592
1593  ((CodeBlob*)(this))->flush();
1594
1595  CodeCache::free(this);
1596}
1597
1598//
1599// Notify all classes this nmethod is dependent on that it is no
1600// longer dependent. This should only be called in two situations.
1601// First, when a nmethod transitions to a zombie all dependents need
1602// to be clear.  Since zombification happens at a safepoint there's no
1603// synchronization issues.  The second place is a little more tricky.
1604// During phase 1 of mark sweep class unloading may happen and as a
1605// result some nmethods may get unloaded.  In this case the flushing
1606// of dependencies must happen during phase 1 since after GC any
1607// dependencies in the unloaded nmethod won't be updated, so
1608// traversing the dependency information in unsafe.  In that case this
1609// function is called with a non-NULL argument and this function only
1610// notifies instanceKlasses that are reachable
1611
1612void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
1613  assert_locked_or_safepoint(CodeCache_lock);
1614  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
1615  "is_alive is non-NULL if and only if we are called during GC");
1616  if (!has_flushed_dependencies()) {
1617    set_has_flushed_dependencies();
1618    for (Dependencies::DepStream deps(this); deps.next(); ) {
1619      if (deps.type() == Dependencies::call_site_target_value) {
1620        // CallSite dependencies are managed on per-CallSite instance basis.
1621        oop call_site = deps.argument_oop(0);
1622        MethodHandles::remove_dependent_nmethod(call_site, this);
1623      } else {
1624        Klass* klass = deps.context_type();
1625        if (klass == NULL) {
1626          continue;  // ignore things like evol_method
1627        }
1628        // During GC the is_alive closure is non-NULL, and is used to
1629        // determine liveness of dependees that need to be updated.
1630        if (is_alive == NULL || klass->is_loader_alive(is_alive)) {
1631          InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1632        }
1633      }
1634    }
1635  }
1636}
1637
1638
1639// If this oop is not live, the nmethod can be unloaded.
1640bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
1641  assert(root != NULL, "just checking");
1642  oop obj = *root;
1643  if (obj == NULL || is_alive->do_object_b(obj)) {
1644      return false;
1645  }
1646
1647  // If ScavengeRootsInCode is true, an nmethod might be unloaded
1648  // simply because one of its constant oops has gone dead.
1649  // No actual classes need to be unloaded in order for this to occur.
1650  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
1651  make_unloaded(is_alive, obj);
1652  return true;
1653}
1654
1655// ------------------------------------------------------------------
1656// post_compiled_method_load_event
1657// new method for install_code() path
1658// Transfer information from compilation to jvmti
1659void nmethod::post_compiled_method_load_event() {
1660
1661  Method* moop = method();
1662  HOTSPOT_COMPILED_METHOD_LOAD(
1663      (char *) moop->klass_name()->bytes(),
1664      moop->klass_name()->utf8_length(),
1665      (char *) moop->name()->bytes(),
1666      moop->name()->utf8_length(),
1667      (char *) moop->signature()->bytes(),
1668      moop->signature()->utf8_length(),
1669      insts_begin(), insts_size());
1670
1671  if (JvmtiExport::should_post_compiled_method_load() ||
1672      JvmtiExport::should_post_compiled_method_unload()) {
1673    get_and_cache_jmethod_id();
1674  }
1675
1676  if (JvmtiExport::should_post_compiled_method_load()) {
1677    // Let the Service thread (which is a real Java thread) post the event
1678    MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1679    JvmtiDeferredEventQueue::enqueue(
1680      JvmtiDeferredEvent::compiled_method_load_event(this));
1681  }
1682}
1683
1684jmethodID nmethod::get_and_cache_jmethod_id() {
1685  if (_jmethod_id == NULL) {
1686    // Cache the jmethod_id since it can no longer be looked up once the
1687    // method itself has been marked for unloading.
1688    _jmethod_id = method()->jmethod_id();
1689  }
1690  return _jmethod_id;
1691}
1692
1693void nmethod::post_compiled_method_unload() {
1694  if (unload_reported()) {
1695    // During unloading we transition to unloaded and then to zombie
1696    // and the unloading is reported during the first transition.
1697    return;
1698  }
1699
1700  assert(_method != NULL && !is_unloaded(), "just checking");
1701  DTRACE_METHOD_UNLOAD_PROBE(method());
1702
1703  // If a JVMTI agent has enabled the CompiledMethodUnload event then
1704  // post the event. Sometime later this nmethod will be made a zombie
1705  // by the sweeper but the Method* will not be valid at that point.
1706  // If the _jmethod_id is null then no load event was ever requested
1707  // so don't bother posting the unload.  The main reason for this is
1708  // that the jmethodID is a weak reference to the Method* so if
1709  // it's being unloaded there's no way to look it up since the weak
1710  // ref will have been cleared.
1711  if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
1712    assert(!unload_reported(), "already unloaded");
1713    JvmtiDeferredEvent event =
1714      JvmtiDeferredEvent::compiled_method_unload_event(this,
1715          _jmethod_id, insts_begin());
1716    if (SafepointSynchronize::is_at_safepoint()) {
1717      // Don't want to take the queueing lock. Add it as pending and
1718      // it will get enqueued later.
1719      JvmtiDeferredEventQueue::add_pending_event(event);
1720    } else {
1721      MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1722      JvmtiDeferredEventQueue::enqueue(event);
1723    }
1724  }
1725
1726  // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1727  // any time. As the nmethod is being unloaded now we mark it has
1728  // having the unload event reported - this will ensure that we don't
1729  // attempt to report the event in the unlikely scenario where the
1730  // event is enabled at the time the nmethod is made a zombie.
1731  set_unload_reported();
1732}
1733
1734void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
1735  if (ic->is_icholder_call()) {
1736    // The only exception is compiledICHolder oops which may
1737    // yet be marked below. (We check this further below).
1738    CompiledICHolder* cichk_oop = ic->cached_icholder();
1739
1740    if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
1741        cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
1742      return;
1743    }
1744  } else {
1745    Metadata* ic_oop = ic->cached_metadata();
1746    if (ic_oop != NULL) {
1747      if (ic_oop->is_klass()) {
1748        if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
1749          return;
1750        }
1751      } else if (ic_oop->is_method()) {
1752        if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
1753          return;
1754        }
1755      } else {
1756        ShouldNotReachHere();
1757      }
1758    }
1759  }
1760
1761  ic->set_to_clean();
1762}
1763
1764// This is called at the end of the strong tracing/marking phase of a
1765// GC to unload an nmethod if it contains otherwise unreachable
1766// oops.
1767
1768void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
1769  // Make sure the oop's ready to receive visitors
1770  assert(!is_zombie() && !is_unloaded(),
1771         "should not call follow on zombie or unloaded nmethod");
1772
1773  // If the method is not entrant then a JMP is plastered over the
1774  // first few bytes.  If an oop in the old code was there, that oop
1775  // should not get GC'd.  Skip the first few bytes of oops on
1776  // not-entrant methods.
1777  address low_boundary = verified_entry_point();
1778  if (is_not_entrant()) {
1779    low_boundary += NativeJump::instruction_size;
1780    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1781    // (See comment above.)
1782  }
1783
1784  // The RedefineClasses() API can cause the class unloading invariant
1785  // to no longer be true. See jvmtiExport.hpp for details.
1786  // Also, leave a debugging breadcrumb in local flag.
1787  if (JvmtiExport::has_redefined_a_class()) {
1788    // This set of the unloading_occurred flag is done before the
1789    // call to post_compiled_method_unload() so that the unloading
1790    // of this nmethod is reported.
1791    unloading_occurred = true;
1792  }
1793
1794  // Exception cache
1795  clean_exception_cache(is_alive);
1796
1797  // If class unloading occurred we first iterate over all inline caches and
1798  // clear ICs where the cached oop is referring to an unloaded klass or method.
1799  // The remaining live cached oops will be traversed in the relocInfo::oop_type
1800  // iteration below.
1801  if (unloading_occurred) {
1802    RelocIterator iter(this, low_boundary);
1803    while(iter.next()) {
1804      if (iter.type() == relocInfo::virtual_call_type) {
1805        CompiledIC *ic = CompiledIC_at(&iter);
1806        clean_ic_if_metadata_is_dead(ic, is_alive);
1807      }
1808    }
1809  }
1810
1811  // Compiled code
1812  {
1813  RelocIterator iter(this, low_boundary);
1814  while (iter.next()) {
1815    if (iter.type() == relocInfo::oop_type) {
1816      oop_Relocation* r = iter.oop_reloc();
1817      // In this loop, we must only traverse those oops directly embedded in
1818      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1819      assert(1 == (r->oop_is_immediate()) +
1820                  (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1821             "oop must be found in exactly one place");
1822      if (r->oop_is_immediate() && r->oop_value() != NULL) {
1823        if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1824          return;
1825        }
1826      }
1827    }
1828  }
1829  }
1830
1831
1832  // Scopes
1833  for (oop* p = oops_begin(); p < oops_end(); p++) {
1834    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1835    if (can_unload(is_alive, p, unloading_occurred)) {
1836      return;
1837    }
1838  }
1839
1840#if INCLUDE_JVMCI
1841  // Follow JVMCI method
1842  BarrierSet* bs = Universe::heap()->barrier_set();
1843  if (_jvmci_installed_code != NULL) {
1844    if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1845      if (!is_alive->do_object_b(_jvmci_installed_code)) {
1846        bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
1847        _jvmci_installed_code = NULL;
1848        bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
1849      }
1850    } else {
1851      if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1852        return;
1853      }
1854    }
1855  }
1856
1857  if (_speculation_log != NULL) {
1858    if (!is_alive->do_object_b(_speculation_log)) {
1859      bs->write_ref_nmethod_pre(&_speculation_log, this);
1860      _speculation_log = NULL;
1861      bs->write_ref_nmethod_post(&_speculation_log, this);
1862    }
1863  }
1864#endif
1865
1866
1867  // Ensure that all metadata is still alive
1868  verify_metadata_loaders(low_boundary, is_alive);
1869}
1870
1871template <class CompiledICorStaticCall>
1872static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, nmethod* from) {
1873  // Ok, to lookup references to zombies here
1874  CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
1875  if (cb != NULL && cb->is_nmethod()) {
1876    nmethod* nm = (nmethod*)cb;
1877
1878    if (nm->unloading_clock() != nmethod::global_unloading_clock()) {
1879      // The nmethod has not been processed yet.
1880      return true;
1881    }
1882
1883    // Clean inline caches pointing to both zombie and not_entrant methods
1884    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1885      ic->set_to_clean();
1886      assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
1887    }
1888  }
1889
1890  return false;
1891}
1892
1893static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, nmethod* from) {
1894  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
1895}
1896
1897static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, nmethod* from) {
1898  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
1899}
1900
1901bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
1902  assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
1903
1904  oop_Relocation* r = iter_at_oop->oop_reloc();
1905  // Traverse those oops directly embedded in the code.
1906  // Other oops (oop_index>0) are seen as part of scopes_oops.
1907  assert(1 == (r->oop_is_immediate()) +
1908         (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1909         "oop must be found in exactly one place");
1910  if (r->oop_is_immediate() && r->oop_value() != NULL) {
1911    // Unload this nmethod if the oop is dead.
1912    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
1913      return true;;
1914    }
1915  }
1916
1917  return false;
1918}
1919
1920
1921bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
1922  ResourceMark rm;
1923
1924  // Make sure the oop's ready to receive visitors
1925  assert(!is_zombie() && !is_unloaded(),
1926         "should not call follow on zombie or unloaded nmethod");
1927
1928  // If the method is not entrant then a JMP is plastered over the
1929  // first few bytes.  If an oop in the old code was there, that oop
1930  // should not get GC'd.  Skip the first few bytes of oops on
1931  // not-entrant methods.
1932  address low_boundary = verified_entry_point();
1933  if (is_not_entrant()) {
1934    low_boundary += NativeJump::instruction_size;
1935    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
1936    // (See comment above.)
1937  }
1938
1939  // The RedefineClasses() API can cause the class unloading invariant
1940  // to no longer be true. See jvmtiExport.hpp for details.
1941  // Also, leave a debugging breadcrumb in local flag.
1942  if (JvmtiExport::has_redefined_a_class()) {
1943    // This set of the unloading_occurred flag is done before the
1944    // call to post_compiled_method_unload() so that the unloading
1945    // of this nmethod is reported.
1946    unloading_occurred = true;
1947  }
1948
1949#if INCLUDE_JVMCI
1950  // Follow JVMCI method
1951  if (_jvmci_installed_code != NULL) {
1952    if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
1953      if (!is_alive->do_object_b(_jvmci_installed_code)) {
1954        _jvmci_installed_code = NULL;
1955      }
1956    } else {
1957      if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
1958        return false;
1959      }
1960    }
1961  }
1962
1963  if (_speculation_log != NULL) {
1964    if (!is_alive->do_object_b(_speculation_log)) {
1965      _speculation_log = NULL;
1966    }
1967  }
1968#endif
1969
1970  // Exception cache
1971  clean_exception_cache(is_alive);
1972
1973  bool is_unloaded = false;
1974  bool postponed = false;
1975
1976  RelocIterator iter(this, low_boundary);
1977  while(iter.next()) {
1978
1979    switch (iter.type()) {
1980
1981    case relocInfo::virtual_call_type:
1982      if (unloading_occurred) {
1983        // If class unloading occurred we first iterate over all inline caches and
1984        // clear ICs where the cached oop is referring to an unloaded klass or method.
1985        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
1986      }
1987
1988      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1989      break;
1990
1991    case relocInfo::opt_virtual_call_type:
1992      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
1993      break;
1994
1995    case relocInfo::static_call_type:
1996      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
1997      break;
1998
1999    case relocInfo::oop_type:
2000      if (!is_unloaded) {
2001        is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
2002      }
2003      break;
2004
2005    case relocInfo::metadata_type:
2006      break; // nothing to do.
2007    }
2008  }
2009
2010  if (is_unloaded) {
2011    return postponed;
2012  }
2013
2014  // Scopes
2015  for (oop* p = oops_begin(); p < oops_end(); p++) {
2016    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2017    if (can_unload(is_alive, p, unloading_occurred)) {
2018      is_unloaded = true;
2019      break;
2020    }
2021  }
2022
2023  if (is_unloaded) {
2024    return postponed;
2025  }
2026
2027#if INCLUDE_JVMCI
2028  // Follow JVMCI method
2029  BarrierSet* bs = Universe::heap()->barrier_set();
2030  if (_jvmci_installed_code != NULL) {
2031    if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
2032      if (!is_alive->do_object_b(_jvmci_installed_code)) {
2033        bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
2034        _jvmci_installed_code = NULL;
2035        bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
2036      }
2037    } else {
2038      if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
2039        is_unloaded = true;
2040      }
2041    }
2042  }
2043
2044  if (_speculation_log != NULL) {
2045    if (!is_alive->do_object_b(_speculation_log)) {
2046      bs->write_ref_nmethod_pre(&_speculation_log, this);
2047      _speculation_log = NULL;
2048      bs->write_ref_nmethod_post(&_speculation_log, this);
2049    }
2050  }
2051#endif
2052
2053  // Ensure that all metadata is still alive
2054  verify_metadata_loaders(low_boundary, is_alive);
2055
2056  return postponed;
2057}
2058
2059void nmethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
2060  ResourceMark rm;
2061
2062  // Make sure the oop's ready to receive visitors
2063  assert(!is_zombie(),
2064         "should not call follow on zombie nmethod");
2065
2066  // If the method is not entrant then a JMP is plastered over the
2067  // first few bytes.  If an oop in the old code was there, that oop
2068  // should not get GC'd.  Skip the first few bytes of oops on
2069  // not-entrant methods.
2070  address low_boundary = verified_entry_point();
2071  if (is_not_entrant()) {
2072    low_boundary += NativeJump::instruction_size;
2073    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2074    // (See comment above.)
2075  }
2076
2077  RelocIterator iter(this, low_boundary);
2078  while(iter.next()) {
2079
2080    switch (iter.type()) {
2081
2082    case relocInfo::virtual_call_type:
2083      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
2084      break;
2085
2086    case relocInfo::opt_virtual_call_type:
2087      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
2088      break;
2089
2090    case relocInfo::static_call_type:
2091      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
2092      break;
2093    }
2094  }
2095}
2096
2097#ifdef ASSERT
2098
2099class CheckClass : AllStatic {
2100  static BoolObjectClosure* _is_alive;
2101
2102  // Check class_loader is alive for this bit of metadata.
2103  static void check_class(Metadata* md) {
2104    Klass* klass = NULL;
2105    if (md->is_klass()) {
2106      klass = ((Klass*)md);
2107    } else if (md->is_method()) {
2108      klass = ((Method*)md)->method_holder();
2109    } else if (md->is_methodData()) {
2110      klass = ((MethodData*)md)->method()->method_holder();
2111    } else {
2112      md->print();
2113      ShouldNotReachHere();
2114    }
2115    assert(klass->is_loader_alive(_is_alive), "must be alive");
2116  }
2117 public:
2118  static void do_check_class(BoolObjectClosure* is_alive, nmethod* nm) {
2119    assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
2120    _is_alive = is_alive;
2121    nm->metadata_do(check_class);
2122  }
2123};
2124
2125// This is called during a safepoint so can use static data
2126BoolObjectClosure* CheckClass::_is_alive = NULL;
2127#endif // ASSERT
2128
2129
2130// Processing of oop references should have been sufficient to keep
2131// all strong references alive.  Any weak references should have been
2132// cleared as well.  Visit all the metadata and ensure that it's
2133// really alive.
2134void nmethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
2135#ifdef ASSERT
2136    RelocIterator iter(this, low_boundary);
2137    while (iter.next()) {
2138    // static_stub_Relocations may have dangling references to
2139    // Method*s so trim them out here.  Otherwise it looks like
2140    // compiled code is maintaining a link to dead metadata.
2141    address static_call_addr = NULL;
2142    if (iter.type() == relocInfo::opt_virtual_call_type) {
2143      CompiledIC* cic = CompiledIC_at(&iter);
2144      if (!cic->is_call_to_interpreted()) {
2145        static_call_addr = iter.addr();
2146      }
2147    } else if (iter.type() == relocInfo::static_call_type) {
2148      CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
2149      if (!csc->is_call_to_interpreted()) {
2150        static_call_addr = iter.addr();
2151      }
2152    }
2153    if (static_call_addr != NULL) {
2154      RelocIterator sciter(this, low_boundary);
2155      while (sciter.next()) {
2156        if (sciter.type() == relocInfo::static_stub_type &&
2157            sciter.static_stub_reloc()->static_call() == static_call_addr) {
2158          sciter.static_stub_reloc()->clear_inline_cache();
2159        }
2160      }
2161    }
2162  }
2163  // Check that the metadata embedded in the nmethod is alive
2164  CheckClass::do_check_class(is_alive, this);
2165#endif
2166}
2167
2168
2169// Iterate over metadata calling this function.   Used by RedefineClasses
2170void nmethod::metadata_do(void f(Metadata*)) {
2171  address low_boundary = verified_entry_point();
2172  if (is_not_entrant()) {
2173    low_boundary += NativeJump::instruction_size;
2174    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2175    // (See comment above.)
2176  }
2177  {
2178    // Visit all immediate references that are embedded in the instruction stream.
2179    RelocIterator iter(this, low_boundary);
2180    while (iter.next()) {
2181      if (iter.type() == relocInfo::metadata_type ) {
2182        metadata_Relocation* r = iter.metadata_reloc();
2183        // In this metadata, we must only follow those metadatas directly embedded in
2184        // the code.  Other metadatas (oop_index>0) are seen as part of
2185        // the metadata section below.
2186        assert(1 == (r->metadata_is_immediate()) +
2187               (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
2188               "metadata must be found in exactly one place");
2189        if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
2190          Metadata* md = r->metadata_value();
2191          if (md != _method) f(md);
2192        }
2193      } else if (iter.type() == relocInfo::virtual_call_type) {
2194        // Check compiledIC holders associated with this nmethod
2195        CompiledIC *ic = CompiledIC_at(&iter);
2196        if (ic->is_icholder_call()) {
2197          CompiledICHolder* cichk = ic->cached_icholder();
2198          f(cichk->holder_method());
2199          f(cichk->holder_klass());
2200        } else {
2201          Metadata* ic_oop = ic->cached_metadata();
2202          if (ic_oop != NULL) {
2203            f(ic_oop);
2204          }
2205        }
2206      }
2207    }
2208  }
2209
2210  // Visit the metadata section
2211  for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2212    if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
2213    Metadata* md = *p;
2214    f(md);
2215  }
2216
2217  // Visit metadata not embedded in the other places.
2218  if (_method != NULL) f(_method);
2219}
2220
2221void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
2222  // make sure the oops ready to receive visitors
2223  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
2224  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
2225
2226  // If the method is not entrant or zombie then a JMP is plastered over the
2227  // first few bytes.  If an oop in the old code was there, that oop
2228  // should not get GC'd.  Skip the first few bytes of oops on
2229  // not-entrant methods.
2230  address low_boundary = verified_entry_point();
2231  if (is_not_entrant()) {
2232    low_boundary += NativeJump::instruction_size;
2233    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
2234    // (See comment above.)
2235  }
2236
2237#if INCLUDE_JVMCI
2238  if (_jvmci_installed_code != NULL) {
2239    f->do_oop((oop*) &_jvmci_installed_code);
2240  }
2241  if (_speculation_log != NULL) {
2242    f->do_oop((oop*) &_speculation_log);
2243  }
2244#endif
2245
2246  RelocIterator iter(this, low_boundary);
2247
2248  while (iter.next()) {
2249    if (iter.type() == relocInfo::oop_type ) {
2250      oop_Relocation* r = iter.oop_reloc();
2251      // In this loop, we must only follow those oops directly embedded in
2252      // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
2253      assert(1 == (r->oop_is_immediate()) +
2254                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
2255             "oop must be found in exactly one place");
2256      if (r->oop_is_immediate() && r->oop_value() != NULL) {
2257        f->do_oop(r->oop_addr());
2258      }
2259    }
2260  }
2261
2262  // Scopes
2263  // This includes oop constants not inlined in the code stream.
2264  for (oop* p = oops_begin(); p < oops_end(); p++) {
2265    if (*p == Universe::non_oop_word())  continue;  // skip non-oops
2266    f->do_oop(p);
2267  }
2268}
2269
2270#define NMETHOD_SENTINEL ((nmethod*)badAddress)
2271
2272nmethod* volatile nmethod::_oops_do_mark_nmethods;
2273
2274// An nmethod is "marked" if its _mark_link is set non-null.
2275// Even if it is the end of the linked list, it will have a non-null link value,
2276// as long as it is on the list.
2277// This code must be MP safe, because it is used from parallel GC passes.
2278bool nmethod::test_set_oops_do_mark() {
2279  assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
2280  nmethod* observed_mark_link = _oops_do_mark_link;
2281  if (observed_mark_link == NULL) {
2282    // Claim this nmethod for this thread to mark.
2283    observed_mark_link = (nmethod*)
2284      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
2285    if (observed_mark_link == NULL) {
2286
2287      // Atomically append this nmethod (now claimed) to the head of the list:
2288      nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
2289      for (;;) {
2290        nmethod* required_mark_nmethods = observed_mark_nmethods;
2291        _oops_do_mark_link = required_mark_nmethods;
2292        observed_mark_nmethods = (nmethod*)
2293          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
2294        if (observed_mark_nmethods == required_mark_nmethods)
2295          break;
2296      }
2297      // Mark was clear when we first saw this guy.
2298      NOT_PRODUCT(if (TraceScavenge)  print_on(tty, "oops_do, mark"));
2299      return false;
2300    }
2301  }
2302  // On fall through, another racing thread marked this nmethod before we did.
2303  return true;
2304}
2305
2306void nmethod::oops_do_marking_prologue() {
2307  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("[oops_do_marking_prologue"));
2308  assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
2309  // We use cmpxchg_ptr instead of regular assignment here because the user
2310  // may fork a bunch of threads, and we need them all to see the same state.
2311  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
2312  guarantee(observed == NULL, "no races in this sequential code");
2313}
2314
2315void nmethod::oops_do_marking_epilogue() {
2316  assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
2317  nmethod* cur = _oops_do_mark_nmethods;
2318  while (cur != NMETHOD_SENTINEL) {
2319    assert(cur != NULL, "not NULL-terminated");
2320    nmethod* next = cur->_oops_do_mark_link;
2321    cur->_oops_do_mark_link = NULL;
2322    cur->verify_oop_relocations();
2323    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
2324    cur = next;
2325  }
2326  void* required = _oops_do_mark_nmethods;
2327  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
2328  guarantee(observed == required, "no races in this sequential code");
2329  NOT_PRODUCT(if (TraceScavenge)  tty->print_cr("oops_do_marking_epilogue]"));
2330}
2331
2332class DetectScavengeRoot: public OopClosure {
2333  bool     _detected_scavenge_root;
2334public:
2335  DetectScavengeRoot() : _detected_scavenge_root(false)
2336  { NOT_PRODUCT(_print_nm = NULL); }
2337  bool detected_scavenge_root() { return _detected_scavenge_root; }
2338  virtual void do_oop(oop* p) {
2339    if ((*p) != NULL && (*p)->is_scavengable()) {
2340      NOT_PRODUCT(maybe_print(p));
2341      _detected_scavenge_root = true;
2342    }
2343  }
2344  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2345
2346#ifndef PRODUCT
2347  nmethod* _print_nm;
2348  void maybe_print(oop* p) {
2349    if (_print_nm == NULL)  return;
2350    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
2351    tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
2352                  p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
2353                  p2i(*p), p2i(p));
2354    (*p)->print();
2355  }
2356#endif //PRODUCT
2357};
2358
2359bool nmethod::detect_scavenge_root_oops() {
2360  DetectScavengeRoot detect_scavenge_root;
2361  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
2362  oops_do(&detect_scavenge_root);
2363  return detect_scavenge_root.detected_scavenge_root();
2364}
2365
2366// Method that knows how to preserve outgoing arguments at call. This method must be
2367// called with a frame corresponding to a Java invoke
2368void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
2369#ifndef SHARK
2370  if (method() != NULL && !method()->is_native()) {
2371    SimpleScopeDesc ssd(this, fr.pc());
2372    Bytecode_invoke call(ssd.method(), ssd.bci());
2373    bool has_receiver = call.has_receiver();
2374    bool has_appendix = call.has_appendix();
2375    Symbol* signature = call.signature();
2376    fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
2377  }
2378#endif // !SHARK
2379}
2380
2381inline bool includes(void* p, void* from, void* to) {
2382  return from <= p && p < to;
2383}
2384
2385
2386void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2387  assert(count >= 2, "must be sentinel values, at least");
2388
2389#ifdef ASSERT
2390  // must be sorted and unique; we do a binary search in find_pc_desc()
2391  int prev_offset = pcs[0].pc_offset();
2392  assert(prev_offset == PcDesc::lower_offset_limit,
2393         "must start with a sentinel");
2394  for (int i = 1; i < count; i++) {
2395    int this_offset = pcs[i].pc_offset();
2396    assert(this_offset > prev_offset, "offsets must be sorted");
2397    prev_offset = this_offset;
2398  }
2399  assert(prev_offset == PcDesc::upper_offset_limit,
2400         "must end with a sentinel");
2401#endif //ASSERT
2402
2403  // Search for MethodHandle invokes and tag the nmethod.
2404  for (int i = 0; i < count; i++) {
2405    if (pcs[i].is_method_handle_invoke()) {
2406      set_has_method_handle_invokes(true);
2407      break;
2408    }
2409  }
2410  assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
2411
2412  int size = count * sizeof(PcDesc);
2413  assert(scopes_pcs_size() >= size, "oob");
2414  memcpy(scopes_pcs_begin(), pcs, size);
2415
2416  // Adjust the final sentinel downward.
2417  PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2418  assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2419  last_pc->set_pc_offset(content_size() + 1);
2420  for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2421    // Fill any rounding gaps with copies of the last record.
2422    last_pc[1] = last_pc[0];
2423  }
2424  // The following assert could fail if sizeof(PcDesc) is not
2425  // an integral multiple of oopSize (the rounding term).
2426  // If it fails, change the logic to always allocate a multiple
2427  // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2428  assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2429}
2430
2431void nmethod::copy_scopes_data(u_char* buffer, int size) {
2432  assert(scopes_data_size() >= size, "oob");
2433  memcpy(scopes_data_begin(), buffer, size);
2434}
2435
2436// When using JVMCI the address might be off by the size of a call instruction.
2437bool nmethod::is_deopt_entry(address pc) {
2438  return pc == deopt_handler_begin()
2439#if INCLUDE_JVMCI
2440    || pc == (deopt_handler_begin() + NativeCall::instruction_size)
2441#endif
2442    ;
2443}
2444
2445#ifdef ASSERT
2446static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) {
2447  PcDesc* lower = nm->scopes_pcs_begin();
2448  PcDesc* upper = nm->scopes_pcs_end();
2449  lower += 1; // exclude initial sentinel
2450  PcDesc* res = NULL;
2451  for (PcDesc* p = lower; p < upper; p++) {
2452    NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2453    if (match_desc(p, pc_offset, approximate)) {
2454      if (res == NULL)
2455        res = p;
2456      else
2457        res = (PcDesc*) badAddress;
2458    }
2459  }
2460  return res;
2461}
2462#endif
2463
2464
2465// Finds a PcDesc with real-pc equal to "pc"
2466PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) {
2467  address base_address = code_begin();
2468  if ((pc < base_address) ||
2469      (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2470    return NULL;  // PC is wildly out of range
2471  }
2472  int pc_offset = (int) (pc - base_address);
2473
2474  // Check the PcDesc cache if it contains the desired PcDesc
2475  // (This as an almost 100% hit rate.)
2476  PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2477  if (res != NULL) {
2478    assert(res == linear_search(this, pc_offset, approximate), "cache ok");
2479    return res;
2480  }
2481
2482  // Fallback algorithm: quasi-linear search for the PcDesc
2483  // Find the last pc_offset less than the given offset.
2484  // The successor must be the required match, if there is a match at all.
2485  // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2486  PcDesc* lower = scopes_pcs_begin();
2487  PcDesc* upper = scopes_pcs_end();
2488  upper -= 1; // exclude final sentinel
2489  if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2490
2491#define assert_LU_OK \
2492  /* invariant on lower..upper during the following search: */ \
2493  assert(lower->pc_offset() <  pc_offset, "sanity"); \
2494  assert(upper->pc_offset() >= pc_offset, "sanity")
2495  assert_LU_OK;
2496
2497  // Use the last successful return as a split point.
2498  PcDesc* mid = _pc_desc_cache.last_pc_desc();
2499  NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2500  if (mid->pc_offset() < pc_offset) {
2501    lower = mid;
2502  } else {
2503    upper = mid;
2504  }
2505
2506  // Take giant steps at first (4096, then 256, then 16, then 1)
2507  const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2508  const int RADIX = (1 << LOG2_RADIX);
2509  for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2510    while ((mid = lower + step) < upper) {
2511      assert_LU_OK;
2512      NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2513      if (mid->pc_offset() < pc_offset) {
2514        lower = mid;
2515      } else {
2516        upper = mid;
2517        break;
2518      }
2519    }
2520    assert_LU_OK;
2521  }
2522
2523  // Sneak up on the value with a linear search of length ~16.
2524  while (true) {
2525    assert_LU_OK;
2526    mid = lower + 1;
2527    NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2528    if (mid->pc_offset() < pc_offset) {
2529      lower = mid;
2530    } else {
2531      upper = mid;
2532      break;
2533    }
2534  }
2535#undef assert_LU_OK
2536
2537  if (match_desc(upper, pc_offset, approximate)) {
2538    assert(upper == linear_search(this, pc_offset, approximate), "search ok");
2539    _pc_desc_cache.add_pc_desc(upper);
2540    return upper;
2541  } else {
2542    assert(NULL == linear_search(this, pc_offset, approximate), "search ok");
2543    return NULL;
2544  }
2545}
2546
2547
2548void nmethod::check_all_dependencies(DepChange& changes) {
2549  // Checked dependencies are allocated into this ResourceMark
2550  ResourceMark rm;
2551
2552  // Turn off dependency tracing while actually testing dependencies.
2553  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2554
2555  typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2556                            &DependencySignature::equals, 11027> DepTable;
2557
2558  DepTable* table = new DepTable();
2559
2560  // Iterate over live nmethods and check dependencies of all nmethods that are not
2561  // marked for deoptimization. A particular dependency is only checked once.
2562  NMethodIterator iter;
2563  while(iter.next()) {
2564    nmethod* nm = iter.method();
2565    // Only notify for live nmethods
2566    if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
2567      for (Dependencies::DepStream deps(nm); deps.next(); ) {
2568        // Construct abstraction of a dependency.
2569        DependencySignature* current_sig = new DependencySignature(deps);
2570
2571        // Determine if dependency is already checked. table->put(...) returns
2572        // 'true' if the dependency is added (i.e., was not in the hashtable).
2573        if (table->put(*current_sig, 1)) {
2574          if (deps.check_dependency() != NULL) {
2575            // Dependency checking failed. Print out information about the failed
2576            // dependency and finally fail with an assert. We can fail here, since
2577            // dependency checking is never done in a product build.
2578            tty->print_cr("Failed dependency:");
2579            changes.print();
2580            nm->print();
2581            nm->print_dependencies();
2582            assert(false, "Should have been marked for deoptimization");
2583          }
2584        }
2585      }
2586    }
2587  }
2588}
2589
2590bool nmethod::check_dependency_on(DepChange& changes) {
2591  // What has happened:
2592  // 1) a new class dependee has been added
2593  // 2) dependee and all its super classes have been marked
2594  bool found_check = false;  // set true if we are upset
2595  for (Dependencies::DepStream deps(this); deps.next(); ) {
2596    // Evaluate only relevant dependencies.
2597    if (deps.spot_check_dependency_at(changes) != NULL) {
2598      found_check = true;
2599      NOT_DEBUG(break);
2600    }
2601  }
2602  return found_check;
2603}
2604
2605bool nmethod::is_evol_dependent_on(Klass* dependee) {
2606  InstanceKlass *dependee_ik = InstanceKlass::cast(dependee);
2607  Array<Method*>* dependee_methods = dependee_ik->methods();
2608  for (Dependencies::DepStream deps(this); deps.next(); ) {
2609    if (deps.type() == Dependencies::evol_method) {
2610      Method* method = deps.method_argument(0);
2611      for (int j = 0; j < dependee_methods->length(); j++) {
2612        if (dependee_methods->at(j) == method) {
2613          // RC_TRACE macro has an embedded ResourceMark
2614          RC_TRACE(0x01000000,
2615            ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)",
2616            _method->method_holder()->external_name(),
2617            _method->name()->as_C_string(),
2618            _method->signature()->as_C_string(), compile_id(),
2619            method->method_holder()->external_name(),
2620            method->name()->as_C_string(),
2621            method->signature()->as_C_string()));
2622          if (TraceDependencies || LogCompilation)
2623            deps.log_dependency(dependee);
2624          return true;
2625        }
2626      }
2627    }
2628  }
2629  return false;
2630}
2631
2632// Called from mark_for_deoptimization, when dependee is invalidated.
2633bool nmethod::is_dependent_on_method(Method* dependee) {
2634  for (Dependencies::DepStream deps(this); deps.next(); ) {
2635    if (deps.type() != Dependencies::evol_method)
2636      continue;
2637    Method* method = deps.method_argument(0);
2638    if (method == dependee) return true;
2639  }
2640  return false;
2641}
2642
2643
2644bool nmethod::is_patchable_at(address instr_addr) {
2645  assert(insts_contains(instr_addr), "wrong nmethod used");
2646  if (is_zombie()) {
2647    // a zombie may never be patched
2648    return false;
2649  }
2650  return true;
2651}
2652
2653
2654address nmethod::continuation_for_implicit_exception(address pc) {
2655  // Exception happened outside inline-cache check code => we are inside
2656  // an active nmethod => use cpc to determine a return address
2657  int exception_offset = pc - code_begin();
2658  int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
2659#ifdef ASSERT
2660  if (cont_offset == 0) {
2661    Thread* thread = ThreadLocalStorage::get_thread_slow();
2662    ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
2663    HandleMark hm(thread);
2664    ResourceMark rm(thread);
2665    CodeBlob* cb = CodeCache::find_blob(pc);
2666    assert(cb != NULL && cb == this, "");
2667    tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
2668    print();
2669    method()->print_codes();
2670    print_code();
2671    print_pcs();
2672  }
2673#endif
2674  if (cont_offset == 0) {
2675    // Let the normal error handling report the exception
2676    return NULL;
2677  }
2678  return code_begin() + cont_offset;
2679}
2680
2681
2682
2683void nmethod_init() {
2684  // make sure you didn't forget to adjust the filler fields
2685  assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2686}
2687
2688
2689//-------------------------------------------------------------------------------------------
2690
2691
2692// QQQ might we make this work from a frame??
2693nmethodLocker::nmethodLocker(address pc) {
2694  CodeBlob* cb = CodeCache::find_blob(pc);
2695  guarantee(cb != NULL && cb->is_nmethod(), "bad pc for a nmethod found");
2696  _nm = (nmethod*)cb;
2697  lock_nmethod(_nm);
2698}
2699
2700// Only JvmtiDeferredEvent::compiled_method_unload_event()
2701// should pass zombie_ok == true.
2702void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
2703  if (nm == NULL)  return;
2704  Atomic::inc(&nm->_lock_count);
2705  assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
2706}
2707
2708void nmethodLocker::unlock_nmethod(nmethod* nm) {
2709  if (nm == NULL)  return;
2710  Atomic::dec(&nm->_lock_count);
2711  assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2712}
2713
2714// -----------------------------------------------------------------------------
2715// nmethod::get_deopt_original_pc
2716//
2717// Return the original PC for the given PC if:
2718// (a) the given PC belongs to a nmethod and
2719// (b) it is a deopt PC
2720address nmethod::get_deopt_original_pc(const frame* fr) {
2721  if (fr->cb() == NULL)  return NULL;
2722
2723  nmethod* nm = fr->cb()->as_nmethod_or_null();
2724  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
2725    return nm->get_original_pc(fr);
2726
2727  return NULL;
2728}
2729
2730
2731// -----------------------------------------------------------------------------
2732// MethodHandle
2733
2734bool nmethod::is_method_handle_return(address return_pc) {
2735  if (!has_method_handle_invokes())  return false;
2736  PcDesc* pd = pc_desc_at(return_pc);
2737  if (pd == NULL)
2738    return false;
2739  return pd->is_method_handle_invoke();
2740}
2741
2742
2743// -----------------------------------------------------------------------------
2744// Verification
2745
2746class VerifyOopsClosure: public OopClosure {
2747  nmethod* _nm;
2748  bool     _ok;
2749public:
2750  VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
2751  bool ok() { return _ok; }
2752  virtual void do_oop(oop* p) {
2753    if ((*p) == NULL || (*p)->is_oop())  return;
2754    if (_ok) {
2755      _nm->print_nmethod(true);
2756      _ok = false;
2757    }
2758    tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2759                  p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2760  }
2761  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2762};
2763
2764void nmethod::verify() {
2765
2766  // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2767  // seems odd.
2768
2769  if (is_zombie() || is_not_entrant() || is_unloaded())
2770    return;
2771
2772  // Make sure all the entry points are correctly aligned for patching.
2773  NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2774
2775  // assert(method()->is_oop(), "must be valid");
2776
2777  ResourceMark rm;
2778
2779  if (!CodeCache::contains(this)) {
2780    fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2781  }
2782
2783  if(is_native_method() )
2784    return;
2785
2786  nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2787  if (nm != this) {
2788    fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2789  }
2790
2791  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2792    if (! p->verify(this)) {
2793      tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2794    }
2795  }
2796
2797  VerifyOopsClosure voc(this);
2798  oops_do(&voc);
2799  assert(voc.ok(), "embedded oops must be OK");
2800  verify_scavenge_root_oops();
2801
2802  verify_scopes();
2803}
2804
2805
2806void nmethod::verify_interrupt_point(address call_site) {
2807  // Verify IC only when nmethod installation is finished.
2808  bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
2809                      || !this->is_in_use();     // nmethod is installed, but not in 'in_use' state
2810  if (is_installed) {
2811    Thread *cur = Thread::current();
2812    if (CompiledIC_lock->owner() == cur ||
2813        ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) &&
2814         SafepointSynchronize::is_at_safepoint())) {
2815      CompiledIC_at(this, call_site);
2816      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
2817    } else {
2818      MutexLocker ml_verify (CompiledIC_lock);
2819      CompiledIC_at(this, call_site);
2820    }
2821  }
2822
2823  PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2824  assert(pd != NULL, "PcDesc must exist");
2825  for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
2826                                     pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
2827                                     pd->return_oop());
2828       !sd->is_top(); sd = sd->sender()) {
2829    sd->verify();
2830  }
2831}
2832
2833void nmethod::verify_scopes() {
2834  if( !method() ) return;       // Runtime stubs have no scope
2835  if (method()->is_native()) return; // Ignore stub methods.
2836  // iterate through all interrupt point
2837  // and verify the debug information is valid.
2838  RelocIterator iter((nmethod*)this);
2839  while (iter.next()) {
2840    address stub = NULL;
2841    switch (iter.type()) {
2842      case relocInfo::virtual_call_type:
2843        verify_interrupt_point(iter.addr());
2844        break;
2845      case relocInfo::opt_virtual_call_type:
2846        stub = iter.opt_virtual_call_reloc()->static_stub();
2847        verify_interrupt_point(iter.addr());
2848        break;
2849      case relocInfo::static_call_type:
2850        stub = iter.static_call_reloc()->static_stub();
2851        //verify_interrupt_point(iter.addr());
2852        break;
2853      case relocInfo::runtime_call_type:
2854        address destination = iter.reloc()->value();
2855        // Right now there is no way to find out which entries support
2856        // an interrupt point.  It would be nice if we had this
2857        // information in a table.
2858        break;
2859    }
2860    assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2861  }
2862}
2863
2864
2865// -----------------------------------------------------------------------------
2866// Non-product code
2867#ifndef PRODUCT
2868
2869class DebugScavengeRoot: public OopClosure {
2870  nmethod* _nm;
2871  bool     _ok;
2872public:
2873  DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
2874  bool ok() { return _ok; }
2875  virtual void do_oop(oop* p) {
2876    if ((*p) == NULL || !(*p)->is_scavengable())  return;
2877    if (_ok) {
2878      _nm->print_nmethod(true);
2879      _ok = false;
2880    }
2881    tty->print_cr("*** scavengable oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2882                  p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2883    (*p)->print();
2884  }
2885  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2886};
2887
2888void nmethod::verify_scavenge_root_oops() {
2889  if (UseG1GC) {
2890    return;
2891  }
2892
2893  if (!on_scavenge_root_list()) {
2894    // Actually look inside, to verify the claim that it's clean.
2895    DebugScavengeRoot debug_scavenge_root(this);
2896    oops_do(&debug_scavenge_root);
2897    if (!debug_scavenge_root.ok())
2898      fatal("found an unadvertised bad scavengable oop in the code cache");
2899  }
2900  assert(scavenge_root_not_marked(), "");
2901}
2902
2903#endif // PRODUCT
2904
2905// Printing operations
2906
2907void nmethod::print() const {
2908  ResourceMark rm;
2909  ttyLocker ttyl;   // keep the following output all in one block
2910
2911  tty->print("Compiled method ");
2912
2913  if (is_compiled_by_c1()) {
2914    tty->print("(c1) ");
2915  } else if (is_compiled_by_c2()) {
2916    tty->print("(c2) ");
2917  } else if (is_compiled_by_shark()) {
2918    tty->print("(shark) ");
2919  } else if (is_compiled_by_jvmci()) {
2920    tty->print("(JVMCI) ");
2921  } else {
2922    tty->print("(nm) ");
2923  }
2924
2925  print_on(tty, NULL);
2926
2927  if (WizardMode) {
2928    tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
2929    tty->print(" for method " INTPTR_FORMAT , p2i(method()));
2930    tty->print(" { ");
2931    if (is_in_use())      tty->print("in_use ");
2932    if (is_not_entrant()) tty->print("not_entrant ");
2933    if (is_zombie())      tty->print("zombie ");
2934    if (is_unloaded())    tty->print("unloaded ");
2935    if (on_scavenge_root_list())  tty->print("scavenge_root ");
2936    tty->print_cr("}:");
2937  }
2938  if (size              () > 0) tty->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2939                                              p2i(this),
2940                                              p2i(this) + size(),
2941                                              size());
2942  if (relocation_size   () > 0) tty->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2943                                              p2i(relocation_begin()),
2944                                              p2i(relocation_end()),
2945                                              relocation_size());
2946  if (consts_size       () > 0) tty->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2947                                              p2i(consts_begin()),
2948                                              p2i(consts_end()),
2949                                              consts_size());
2950  if (insts_size        () > 0) tty->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2951                                              p2i(insts_begin()),
2952                                              p2i(insts_end()),
2953                                              insts_size());
2954  if (stub_size         () > 0) tty->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2955                                              p2i(stub_begin()),
2956                                              p2i(stub_end()),
2957                                              stub_size());
2958  if (oops_size         () > 0) tty->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2959                                              p2i(oops_begin()),
2960                                              p2i(oops_end()),
2961                                              oops_size());
2962  if (metadata_size      () > 0) tty->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2963                                              p2i(metadata_begin()),
2964                                              p2i(metadata_end()),
2965                                              metadata_size());
2966  if (scopes_data_size  () > 0) tty->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2967                                              p2i(scopes_data_begin()),
2968                                              p2i(scopes_data_end()),
2969                                              scopes_data_size());
2970  if (scopes_pcs_size   () > 0) tty->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2971                                              p2i(scopes_pcs_begin()),
2972                                              p2i(scopes_pcs_end()),
2973                                              scopes_pcs_size());
2974  if (dependencies_size () > 0) tty->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2975                                              p2i(dependencies_begin()),
2976                                              p2i(dependencies_end()),
2977                                              dependencies_size());
2978  if (handler_table_size() > 0) tty->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2979                                              p2i(handler_table_begin()),
2980                                              p2i(handler_table_end()),
2981                                              handler_table_size());
2982  if (nul_chk_table_size() > 0) tty->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2983                                              p2i(nul_chk_table_begin()),
2984                                              p2i(nul_chk_table_end()),
2985                                              nul_chk_table_size());
2986}
2987
2988void nmethod::print_code() {
2989  HandleMark hm;
2990  ResourceMark m;
2991  Disassembler::decode(this);
2992}
2993
2994
2995#ifndef PRODUCT
2996
2997void nmethod::print_scopes() {
2998  // Find the first pc desc for all scopes in the code and print it.
2999  ResourceMark rm;
3000  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3001    if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
3002      continue;
3003
3004    ScopeDesc* sd = scope_desc_at(p->real_pc(this));
3005    while (sd != NULL) {
3006      sd->print_on(tty, p);
3007      sd = sd->sender();
3008    }
3009  }
3010}
3011
3012void nmethod::print_dependencies() {
3013  ResourceMark rm;
3014  ttyLocker ttyl;   // keep the following output all in one block
3015  tty->print_cr("Dependencies:");
3016  for (Dependencies::DepStream deps(this); deps.next(); ) {
3017    deps.print_dependency();
3018    Klass* ctxk = deps.context_type();
3019    if (ctxk != NULL) {
3020      if (ctxk->oop_is_instance() && ((InstanceKlass*)ctxk)->is_dependent_nmethod(this)) {
3021        tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
3022      }
3023    }
3024    deps.log_dependency();  // put it into the xml log also
3025  }
3026}
3027
3028
3029void nmethod::print_relocations() {
3030  ResourceMark m;       // in case methods get printed via the debugger
3031  tty->print_cr("relocations:");
3032  RelocIterator iter(this);
3033  iter.print();
3034  if (UseRelocIndex) {
3035    jint* index_end   = (jint*)relocation_end() - 1;
3036    jint  index_size  = *index_end;
3037    jint* index_start = (jint*)( (address)index_end - index_size );
3038    tty->print_cr("    index @" INTPTR_FORMAT ": index_size=%d", p2i(index_start), index_size);
3039    if (index_size > 0) {
3040      jint* ip;
3041      for (ip = index_start; ip+2 <= index_end; ip += 2)
3042        tty->print_cr("  (%d %d) addr=" INTPTR_FORMAT " @" INTPTR_FORMAT,
3043                      ip[0],
3044                      ip[1],
3045                      p2i(header_end()+ip[0]),
3046                      p2i(relocation_begin()-1+ip[1]));
3047      for (; ip < index_end; ip++)
3048        tty->print_cr("  (%d ?)", ip[0]);
3049      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", p2i(ip), *ip);
3050      ip++;
3051      tty->print_cr("reloc_end @" INTPTR_FORMAT ":", p2i(ip));
3052    }
3053  }
3054}
3055
3056
3057void nmethod::print_pcs() {
3058  ResourceMark m;       // in case methods get printed via debugger
3059  tty->print_cr("pc-bytecode offsets:");
3060  for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
3061    p->print(this);
3062  }
3063}
3064
3065#endif // PRODUCT
3066
3067const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3068  RelocIterator iter(this, begin, end);
3069  bool have_one = false;
3070  while (iter.next()) {
3071    have_one = true;
3072    switch (iter.type()) {
3073        case relocInfo::none:                  return "no_reloc";
3074        case relocInfo::oop_type: {
3075          stringStream st;
3076          oop_Relocation* r = iter.oop_reloc();
3077          oop obj = r->oop_value();
3078          st.print("oop(");
3079          if (obj == NULL) st.print("NULL");
3080          else obj->print_value_on(&st);
3081          st.print(")");
3082          return st.as_string();
3083        }
3084        case relocInfo::metadata_type: {
3085          stringStream st;
3086          metadata_Relocation* r = iter.metadata_reloc();
3087          Metadata* obj = r->metadata_value();
3088          st.print("metadata(");
3089          if (obj == NULL) st.print("NULL");
3090          else obj->print_value_on(&st);
3091          st.print(")");
3092          return st.as_string();
3093        }
3094        case relocInfo::runtime_call_type: {
3095          stringStream st;
3096          st.print("runtime_call");
3097          runtime_call_Relocation* r = iter.runtime_call_reloc();
3098          address dest = r->destination();
3099          CodeBlob* cb = CodeCache::find_blob(dest);
3100          if (cb != NULL) {
3101            st.print(" %s", cb->name());
3102          }
3103          return st.as_string();
3104        }
3105        case relocInfo::virtual_call_type:     return "virtual_call";
3106        case relocInfo::opt_virtual_call_type: return "optimized virtual_call";
3107        case relocInfo::static_call_type:      return "static_call";
3108        case relocInfo::static_stub_type:      return "static_stub";
3109        case relocInfo::external_word_type:    return "external_word";
3110        case relocInfo::internal_word_type:    return "internal_word";
3111        case relocInfo::section_word_type:     return "section_word";
3112        case relocInfo::poll_type:             return "poll";
3113        case relocInfo::poll_return_type:      return "poll_return";
3114        case relocInfo::type_mask:             return "type_bit_mask";
3115    }
3116  }
3117  return have_one ? "other" : NULL;
3118}
3119
3120// Return a the last scope in (begin..end]
3121ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3122  PcDesc* p = pc_desc_near(begin+1);
3123  if (p != NULL && p->real_pc(this) <= end) {
3124    return new ScopeDesc(this, p->scope_decode_offset(),
3125                         p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(),
3126                         p->return_oop());
3127  }
3128  return NULL;
3129}
3130
3131void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const {
3132  if (block_begin == entry_point())             stream->print_cr("[Entry Point]");
3133  if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
3134  if (JVMCI_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
3135  if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
3136  if (JVMCI_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
3137
3138  if (has_method_handle_invokes())
3139    if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
3140
3141  if (block_begin == consts_begin())            stream->print_cr("[Constants]");
3142
3143  if (block_begin == entry_point()) {
3144    methodHandle m = method();
3145    if (m.not_null()) {
3146      stream->print("  # ");
3147      m->print_value_on(stream);
3148      stream->cr();
3149    }
3150    if (m.not_null() && !is_osr_method()) {
3151      ResourceMark rm;
3152      int sizeargs = m->size_of_parameters();
3153      BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3154      VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3155      {
3156        int sig_index = 0;
3157        if (!m->is_static())
3158          sig_bt[sig_index++] = T_OBJECT; // 'this'
3159        for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3160          BasicType t = ss.type();
3161          sig_bt[sig_index++] = t;
3162          if (type2size[t] == 2) {
3163            sig_bt[sig_index++] = T_VOID;
3164          } else {
3165            assert(type2size[t] == 1, "size is 1 or 2");
3166          }
3167        }
3168        assert(sig_index == sizeargs, "");
3169      }
3170      const char* spname = "sp"; // make arch-specific?
3171      intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
3172      int stack_slot_offset = this->frame_size() * wordSize;
3173      int tab1 = 14, tab2 = 24;
3174      int sig_index = 0;
3175      int arg_index = (m->is_static() ? 0 : -1);
3176      bool did_old_sp = false;
3177      for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3178        bool at_this = (arg_index == -1);
3179        bool at_old_sp = false;
3180        BasicType t = (at_this ? T_OBJECT : ss.type());
3181        assert(t == sig_bt[sig_index], "sigs in sync");
3182        if (at_this)
3183          stream->print("  # this: ");
3184        else
3185          stream->print("  # parm%d: ", arg_index);
3186        stream->move_to(tab1);
3187        VMReg fst = regs[sig_index].first();
3188        VMReg snd = regs[sig_index].second();
3189        if (fst->is_reg()) {
3190          stream->print("%s", fst->name());
3191          if (snd->is_valid())  {
3192            stream->print(":%s", snd->name());
3193          }
3194        } else if (fst->is_stack()) {
3195          int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3196          if (offset == stack_slot_offset)  at_old_sp = true;
3197          stream->print("[%s+0x%x]", spname, offset);
3198        } else {
3199          stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3200        }
3201        stream->print(" ");
3202        stream->move_to(tab2);
3203        stream->print("= ");
3204        if (at_this) {
3205          m->method_holder()->print_value_on(stream);
3206        } else {
3207          bool did_name = false;
3208          if (!at_this && ss.is_object()) {
3209            Symbol* name = ss.as_symbol_or_null();
3210            if (name != NULL) {
3211              name->print_value_on(stream);
3212              did_name = true;
3213            }
3214          }
3215          if (!did_name)
3216            stream->print("%s", type2name(t));
3217        }
3218        if (at_old_sp) {
3219          stream->print("  (%s of caller)", spname);
3220          did_old_sp = true;
3221        }
3222        stream->cr();
3223        sig_index += type2size[t];
3224        arg_index += 1;
3225        if (!at_this)  ss.next();
3226      }
3227      if (!did_old_sp) {
3228        stream->print("  # ");
3229        stream->move_to(tab1);
3230        stream->print("[%s+0x%x]", spname, stack_slot_offset);
3231        stream->print("  (%s of caller)", spname);
3232        stream->cr();
3233      }
3234    }
3235  }
3236}
3237
3238void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
3239  // First, find an oopmap in (begin, end].
3240  // We use the odd half-closed interval so that oop maps and scope descs
3241  // which are tied to the byte after a call are printed with the call itself.
3242  address base = code_begin();
3243  ImmutableOopMapSet* oms = oop_maps();
3244  if (oms != NULL) {
3245    for (int i = 0, imax = oms->count(); i < imax; i++) {
3246      const ImmutableOopMapPair* pair = oms->pair_at(i);
3247      const ImmutableOopMap* om = pair->get_from(oms);
3248      address pc = base + pair->pc_offset();
3249      if (pc > begin) {
3250        if (pc <= end) {
3251          st->move_to(column);
3252          st->print("; ");
3253          om->print_on(st);
3254        }
3255        break;
3256      }
3257    }
3258  }
3259
3260  // Print any debug info present at this pc.
3261  ScopeDesc* sd  = scope_desc_in(begin, end);
3262  if (sd != NULL) {
3263    st->move_to(column);
3264    if (sd->bci() == SynchronizationEntryBCI) {
3265      st->print(";*synchronization entry");
3266    } else {
3267      if (sd->method() == NULL) {
3268        st->print("method is NULL");
3269      } else if (sd->method()->is_native()) {
3270        st->print("method is native");
3271      } else {
3272        Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3273        st->print(";*%s", Bytecodes::name(bc));
3274        switch (bc) {
3275        case Bytecodes::_invokevirtual:
3276        case Bytecodes::_invokespecial:
3277        case Bytecodes::_invokestatic:
3278        case Bytecodes::_invokeinterface:
3279          {
3280            Bytecode_invoke invoke(sd->method(), sd->bci());
3281            st->print(" ");
3282            if (invoke.name() != NULL)
3283              invoke.name()->print_symbol_on(st);
3284            else
3285              st->print("<UNKNOWN>");
3286            break;
3287          }
3288        case Bytecodes::_getfield:
3289        case Bytecodes::_putfield:
3290        case Bytecodes::_getstatic:
3291        case Bytecodes::_putstatic:
3292          {
3293            Bytecode_field field(sd->method(), sd->bci());
3294            st->print(" ");
3295            if (field.name() != NULL)
3296              field.name()->print_symbol_on(st);
3297            else
3298              st->print("<UNKNOWN>");
3299          }
3300        }
3301      }
3302      st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
3303    }
3304
3305    // Print all scopes
3306    for (;sd != NULL; sd = sd->sender()) {
3307      st->move_to(column);
3308      st->print("; -");
3309      if (sd->method() == NULL) {
3310        st->print("method is NULL");
3311      } else {
3312        sd->method()->print_short_name(st);
3313      }
3314      int lineno = sd->method()->line_number_from_bci(sd->bci());
3315      if (lineno != -1) {
3316        st->print("@%d (line %d)", sd->bci(), lineno);
3317      } else {
3318        st->print("@%d", sd->bci());
3319      }
3320      st->cr();
3321    }
3322  }
3323
3324  // Print relocation information
3325  const char* str = reloc_string_for(begin, end);
3326  if (str != NULL) {
3327    if (sd != NULL) st->cr();
3328    st->move_to(column);
3329    st->print(";   {%s}", str);
3330  }
3331  int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
3332  if (cont_offset != 0) {
3333    st->move_to(column);
3334    st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3335  }
3336
3337}
3338
3339#ifndef PRODUCT
3340
3341void nmethod::print_value_on(outputStream* st) const {
3342  st->print("nmethod");
3343  print_on(st, NULL);
3344}
3345
3346void nmethod::print_calls(outputStream* st) {
3347  RelocIterator iter(this);
3348  while (iter.next()) {
3349    switch (iter.type()) {
3350    case relocInfo::virtual_call_type:
3351    case relocInfo::opt_virtual_call_type: {
3352      VerifyMutexLocker mc(CompiledIC_lock);
3353      CompiledIC_at(&iter)->print();
3354      break;
3355    }
3356    case relocInfo::static_call_type:
3357      st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
3358      compiledStaticCall_at(iter.reloc())->print();
3359      break;
3360    }
3361  }
3362}
3363
3364void nmethod::print_handler_table() {
3365  ExceptionHandlerTable(this).print();
3366}
3367
3368void nmethod::print_nul_chk_table() {
3369  ImplicitExceptionTable(this).print(code_begin());
3370}
3371
3372void nmethod::print_statistics() {
3373  ttyLocker ttyl;
3374  if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3375  native_nmethod_stats.print_native_nmethod_stats();
3376#ifdef COMPILER1
3377  c1_java_nmethod_stats.print_nmethod_stats("C1");
3378#endif
3379#ifdef COMPILER2
3380  c2_java_nmethod_stats.print_nmethod_stats("C2");
3381#endif
3382#if INCLUDE_JVMCI
3383  jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
3384#endif
3385#ifdef SHARK
3386  shark_java_nmethod_stats.print_nmethod_stats("Shark");
3387#endif
3388  unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
3389  DebugInformationRecorder::print_statistics();
3390#ifndef PRODUCT
3391  pc_nmethod_stats.print_pc_stats();
3392#endif
3393  Dependencies::print_statistics();
3394  if (xtty != NULL)  xtty->tail("statistics");
3395}
3396
3397#endif // !PRODUCT
3398
3399#if INCLUDE_JVMCI
3400char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
3401  if (!this->is_compiled_by_jvmci()) {
3402    return NULL;
3403  }
3404  oop installedCode = this->jvmci_installed_code();
3405  if (installedCode != NULL) {
3406    oop installedCodeName = NULL;
3407    if (installedCode->is_a(InstalledCode::klass())) {
3408      installedCodeName = InstalledCode::name(installedCode);
3409    }
3410    if (installedCodeName != NULL) {
3411      return java_lang_String::as_utf8_string(installedCodeName, buf, (int)buflen);
3412    } else {
3413      jio_snprintf(buf, buflen, "null");
3414      return buf;
3415    }
3416  }
3417  jio_snprintf(buf, buflen, "noInstalledCode");
3418  return buf;
3419}
3420#endif
3421